Merge branch 'dev' into feature/share-review-timestamp

This commit is contained in:
Otto 2026-04-17 17:35:28 +03:00 committed by GitHub
commit d1c864f1de
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
88 changed files with 9947 additions and 1060 deletions

View File

@ -14,6 +14,8 @@ services:
dockerfile: docker/main/Dockerfile dockerfile: docker/main/Dockerfile
# Use target devcontainer-trt for TensorRT dev # Use target devcontainer-trt for TensorRT dev
target: devcontainer target: devcontainer
cache_from:
- ghcr.io/blakeblackshear/frigate:cache-amd64
## Uncomment this block for nvidia gpu support ## Uncomment this block for nvidia gpu support
# deploy: # deploy:
# resources: # resources:

View File

@ -52,6 +52,14 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=cache,target=/root/.ccache \ --mount=type=cache,target=/root/.ccache \
/deps/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
# Build intel-media-driver from source against bookworm's system libva so it
# works with Debian 12's glibc/libstdc++ (pre-built noble/trixie packages
# require glibc 2.38 which is not available on bookworm).
FROM base AS intel-media-driver
ARG DEBIAN_FRONTEND
RUN --mount=type=bind,source=docker/main/build_intel_media_driver.sh,target=/deps/build_intel_media_driver.sh \
/deps/build_intel_media_driver.sh
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
@ -200,6 +208,7 @@ RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install
FROM scratch AS deps-rootfs FROM scratch AS deps-rootfs
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/ COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/ COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
COPY --from=intel-media-driver /rootfs/ /
COPY --from=go2rtc /rootfs/ / COPY --from=go2rtc /rootfs/ /
COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=libusb-build /usr/local/lib /usr/local/lib
COPY --from=tempio /rootfs/ / COPY --from=tempio /rootfs/ /

View File

@ -0,0 +1,48 @@
#!/bin/bash
set -euxo pipefail
# Intel media driver is x86_64-only. Create empty rootfs on other arches so
# the downstream COPY --from has a valid source.
if [ "$(uname -m)" != "x86_64" ]; then
mkdir -p /rootfs
exit 0
fi
MEDIA_DRIVER_VERSION="intel-media-25.2.6"
GMMLIB_VERSION="intel-gmmlib-22.7.2"
apt-get -qq update
apt-get -qq install -y wget gnupg ca-certificates cmake g++ make pkg-config
# Use Intel's jammy repo for newer libva-dev (2.22) which provides the
# VVC/VVC-decode headers required by media-driver 25.x
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" > /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install -y libva-dev
# Build gmmlib (required by media-driver)
wget -qO gmmlib.tar.gz "https://github.com/intel/gmmlib/archive/refs/tags/${GMMLIB_VERSION}.tar.gz"
mkdir /tmp/gmmlib
tar -xf gmmlib.tar.gz -C /tmp/gmmlib --strip-components 1
cmake -S /tmp/gmmlib -B /tmp/gmmlib/build -DCMAKE_BUILD_TYPE=Release
make -C /tmp/gmmlib/build -j"$(nproc)"
make -C /tmp/gmmlib/build install
# Build intel-media-driver
wget -qO media-driver.tar.gz "https://github.com/intel/media-driver/archive/refs/tags/${MEDIA_DRIVER_VERSION}.tar.gz"
mkdir /tmp/media-driver
tar -xf media-driver.tar.gz -C /tmp/media-driver --strip-components 1
cmake -S /tmp/media-driver -B /tmp/media-driver/build \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_KERNELS=ON \
-DENABLE_NONFREE_KERNELS=ON \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=/usr/lib/x86_64-linux-gnu \
-DCMAKE_C_FLAGS="-Wno-error" \
-DCMAKE_CXX_FLAGS="-Wno-error"
make -C /tmp/media-driver/build -j"$(nproc)"
# Install driver to rootfs for COPY --from
make -C /tmp/media-driver/build install DESTDIR=/rootfs

View File

@ -91,8 +91,10 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update apt-get -qq update
# intel-media-va-driver-non-free is built from source in the
# intel-media-driver Dockerfile stage for Battlemage (Xe2) support
apt-get -qq install --no-install-recommends --no-install-suggests -y \ apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 libmfx1 libmfxgen1 libvpl2
apt-get -qq install -y ocl-icd-libopencl1 apt-get -qq install -y ocl-icd-libopencl1

View File

@ -119,6 +119,12 @@ audio:
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background. Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
:::info
Audio transcription requires a one-time internet connection to download the Whisper or Sherpa-ONNX model on first use. Once cached, transcription runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service. Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service.
#### Configuration #### Configuration

View File

@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath";
Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
:::info
Bird classification requires a one-time internet connection to download the classification model and label map from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself. Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself.

View File

@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath";
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API. Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
:::info
Training a custom object classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU. Object classification models are lightweight and run very fast on CPU.

View File

@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath";
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration. State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration.
:::info
Training a custom state classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
State classification models are lightweight and run very fast on CPU. State classification models are lightweight and run very fast on CPU.

View File

@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath";
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
:::info
Face recognition requires a one-time internet connection to download detection and embedding models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Model Requirements ## Model Requirements
### Face Detection ### Face Detection

View File

@ -193,6 +193,12 @@ To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` env
Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers. Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers.
:::info
Cloud Generative AI providers require an active internet connection to send images and prompts for processing. Local providers like llama.cpp and Ollama (with local models) do not require internet. See [Network Requirements](/frigate/network_requirements#generative-ai) for details.
:::
### Ollama Cloud ### Ollama Cloud
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).

View File

@ -59,13 +59,14 @@ Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video
**Recommended hwaccel Preset** **Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes | | CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------- | ------------------------------------------- | | ------------------ | ------------ | ------------------- | ------------------------------------------- |
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 | | gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 |
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported | | gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used | | gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-\* | | | gen13+ | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | | | Intel Arc A-series | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc B-series | iHD / Xe | preset-intel-qsv-\* | Requires host kernel 6.12+ |
::: :::

View File

@ -11,6 +11,12 @@ Frigate can recognize license plates on vehicles and automatically add the detec
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition. LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
:::info
License plate recognition requires a one-time internet connection to download OCR and detection models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
When a plate is recognized, the details are: When a plate is recognized, the details are:
- Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object. - Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object.

View File

@ -21,6 +21,12 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. | | mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. | | webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
:::info
WebRTC may use an external STUN server for NAT traversal. MSE and HLS streaming do not require any internet access. See [Network Requirements](/frigate/network_requirements#webrtc-stun) for details.
:::
### Camera Settings Recommendations ### Camera Settings Recommendations
If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view: If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view:

View File

@ -11,6 +11,12 @@ import NavPath from "@site/src/components/NavPath";
Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption. Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption.
:::info
Push notifications require internet access from the Frigate server to the browser vendor's push service (e.g., Google FCM, Mozilla autopush). See [Network Requirements](/frigate/network_requirements#push-notifications) for details.
:::
## Setting up Notifications ## Setting up Notifications
In order to use notifications the following requirements must be met: In order to use notifications the following requirements must be met:

View File

@ -288,6 +288,12 @@ This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleratio
See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware. See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware.
:::info
If no custom model is provided, the Hailo detector downloads a default model from the Hailo Model Zoo on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
### Configuration ### Configuration
When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.
@ -1793,6 +1799,12 @@ Hardware accelerated object detection is supported on the following SoCs:
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2. This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2.
:::info
If no custom model is provided, the RKNN detector downloads a default model from GitHub on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
:::tip :::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be: When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be:
@ -2176,6 +2188,12 @@ This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AX
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware. See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
:::info
The AXEngine detector downloads its default model from HuggingFace on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details.
:::
### Configuration ### Configuration
When configuring the AXEngine detector, you have to specify the model name. When configuring the AXEngine detector, you have to specify the model name.

View File

@ -281,31 +281,52 @@ Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only reco
Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar. Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar.
### Time-lapse export ### Custom export with FFmpeg arguments
Time lapse exporting is available only via the [HTTP API](../integrations/api/export-recording-export-camera-name-start-start-time-end-end-time-post.api.mdx). For advanced use cases, the [custom export HTTP API](../integrations/api/export-recording-custom-export-custom-camera-name-start-start-time-end-end-time-post.api.mdx) lets you pass custom FFmpeg arguments when exporting a recording:
When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS. ```
POST /export/custom/{camera_name}/start/{start_time}/end/{end_time}
To configure the speed-up factor, the frame rate and further custom settings, use the `timelapse_args` parameter. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
```yaml {3-4}
record:
enabled: True
export:
timelapse_args: "-vf setpts=PTS/60 -r 25"
``` ```
:::tip The request body accepts `ffmpeg_input_args` and `ffmpeg_output_args` to control encoding, frame rate, filters, and other FFmpeg options. If neither is provided, Frigate defaults to time-lapse output settings (25x speed, 30 FPS).
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set the camera-level export hwaccel_args with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). The following example exports a time-lapse at 60x speed with 25 FPS:
```json
{
"name": "Front Door Time-lapse",
"ffmpeg_output_args": "-vf setpts=PTS/60 -r 25"
}
```
#### CPU fallback
If hardware acceleration is configured and the export fails (e.g., the GPU is unavailable), set `cpu_fallback: true` in the request body to automatically retry using software encoding.
```json
{
"name": "My Export",
"ffmpeg_output_args": "-c:v libx264 -crf 23",
"cpu_fallback": true
}
```
:::note
Non-admin users are restricted from using FFmpeg arguments that can access the filesystem (e.g., `-filter_complex`, file paths, and protocol references). Admin users have full control over FFmpeg arguments.
::: :::
:::tip :::tip
The encoder determines its own behavior so the resulting file size may be undesirably large. When `hwaccel_args` is configured, hardware encoding is used for exports. This can be overridden per camera (e.g., when camera resolution exceeds hardware encoder limits) by setting a camera-level `hwaccel_args`. Using an unrecognized value or empty string falls back to software encoding (libx264).
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
:::
:::tip
To reduce output file size, add the FFmpeg parameter `-qp n` to `ffmpeg_output_args` (where `n` is the quantization parameter). Adjust the value to balance quality and file size for your scenario.
::: :::

View File

@ -13,6 +13,12 @@ Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and
Semantic Search is accessed via the _Explore_ view in the Frigate UI. Semantic Search is accessed via the _Explore_ view in the Frigate UI.
:::info
Semantic search requires a one-time internet connection to download embedding models from HuggingFace. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details.
:::
## Minimum System Requirements ## Minimum System Requirements
Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all. Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all.

View File

@ -146,17 +146,11 @@ A single Coral can handle many cameras using the default model and will be suffi
The OpenVINO detector type is able to run on: The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU - 6th Gen Intel Platforms and newer that have an iGPU
- x86 hosts with an Intel Arc GPU - x86 hosts with an Intel Arc GPU (including Arc A-series and B-series Battlemage)
- Intel NPUs - Intel NPUs
- Most modern AMD CPUs (though this is officially not supported by Intel) - Most modern AMD CPUs (though this is officially not supported by Intel)
- x86 & Arm64 hosts via CPU (generally not recommended) - x86 & Arm64 hosts via CPU (generally not recommended)
:::note
Intel B-series (Battlemage) GPUs are not officially supported with Frigate 0.17, though a user has [provided steps to rebuild the Frigate container](https://github.com/blakeblackshear/frigate/discussions/21257) with support for them.
:::
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:

View File

@ -482,7 +482,8 @@ services:
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
- /dev/video11:/dev/video11 # For Raspberry Pi 4B - /dev/video11:/dev/video11 # For Raspberry Pi 4B
- /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware - /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware
- /dev/accel:/dev/accel # Intel NPU - /dev/kfd:/dev/kfd # AMD Kernel Fusion Driver for ROCm
- /dev/accel:/dev/accel # AMD / Intel NPU
volumes: volumes:
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config - /path/to/your/config:/config

View File

@ -0,0 +1,155 @@
---
id: network_requirements
title: Network Requirements
---
# Network Requirements
Frigate is designed to run locally and does not require a persistent internet connection for core functionality. However, certain features need internet access for initial setup or ongoing operation. This page describes what connects to the internet, when, and how to control it.
## How Frigate Uses the Internet
Frigate's internet usage falls into three categories:
1. **One-time model downloads** — ML models are downloaded the first time a feature is enabled, then cached locally. No internet is needed on subsequent startups.
2. **Optional cloud services** — Features like Frigate+ and Generative AI connect to external APIs only when explicitly configured.
3. **Build-time dependencies** — Components bundled into the Docker image during the build process. These require no internet at runtime.
:::tip
After initial setup, Frigate can run fully offline as long as all required models have been downloaded and no cloud-dependent features are enabled.
:::
## One-Time Model Downloads
The following models are downloaded automatically the first time their associated feature is enabled. Once cached in `/config/model_cache/`, they do not require internet again.
| Feature | Models Downloaded | Source |
| --------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------- |
| [Semantic search](/configuration/semantic_search) | Jina CLIP v1 or v2 (ONNX) + tokenizer | HuggingFace |
| [Face recognition](/configuration/face_recognition) | FaceNet, ArcFace, face detection model | GitHub |
| [License plate recognition](/configuration/license_plate_recognition) | PaddleOCR (detection, classification, recognition) + YOLOv9 plate detector | GitHub |
| [Bird classification](/configuration/bird_classification) | MobileNetV2 bird model + label map | GitHub |
| [Custom classification](/configuration/custom_classification/state_classification) (training) | MobileNetV2 ImageNet base weights (via Keras) | Google storage |
| [Audio transcription](/configuration/advanced) | Whisper or Sherpa-ONNX streaming model | HuggingFace / OpenAI |
### Hardware-Specific Detector Models
If you are using one of the following hardware detectors and have not provided your own model file, a default model will be downloaded on first startup:
| Detector | Model Downloaded | Source |
| ------------------------------------------------------------------ | -------------------- | ------------------------ |
| [Rockchip RKNN](/configuration/object_detectors#rockchip-platform) | RKNN detection model | GitHub |
| [Hailo 8 / 8L](/configuration/object_detectors#hailo-8) | YOLOv6n (.hef) | Hailo Model Zoo (AWS S3) |
| [AXERA AXEngine](/configuration/object_detectors) | Detection model | HuggingFace |
:::note
The default CPU, EdgeTPU, and OpenVINO object detection models are bundled into the Docker image and do not require any download at runtime.
:::
### Preventing Model Downloads
If you have already downloaded all required models and want to prevent Frigate from attempting any outbound connections to HuggingFace or the Transformers library, set the following environment variables on your Frigate container:
```yaml
environment:
HF_HUB_OFFLINE: "1"
TRANSFORMERS_OFFLINE: "1"
```
:::warning
Setting these variables without having the correct model files already cached in `/config/model_cache/` will cause failures. Only use these after a successful initial setup with internet access.
:::
### Mirror Support
If your Frigate instance has restricted internet access, you can point model downloads at internal mirrors using environment variables:
| Environment Variable | Default | Used By |
| ----------------------------------- | ----------------------------------- | --------------------------------------------- |
| `HF_ENDPOINT` | `https://huggingface.co` | Semantic search, Sherpa-ONNX, AXEngine models |
| `GITHUB_ENDPOINT` | `https://github.com` | Face recognition, LPR, RKNN models |
| `GITHUB_RAW_ENDPOINT` | `https://raw.githubusercontent.com` | Bird classification |
| `TF_KERAS_MOBILENET_V2_WEIGHTS_URL` | Google storage (Keras default) | Custom classification training |
## Optional Cloud Services
These features connect to external services during normal operation and require internet whenever they are active.
### Frigate+
When a Frigate+ API key is configured, Frigate communicates with `https://api.frigate.video` to download models, upload snapshots for training, submit annotations, and report false positives. Remove the API key to disable all Frigate+ network activity.
See [Frigate+](/integrations/plus) for details.
### Generative AI
When a Generative AI provider is configured, Frigate sends images and prompts to the configured provider for event descriptions, chat, and camera monitoring. Available providers:
| Provider | Internet Required |
| ------------- | ---------------------------------------------------------------- |
| OpenAI | Yes — connects to OpenAI API (or custom base URL) |
| Google Gemini | Yes — connects to Google Generative AI API |
| Azure OpenAI | Yes — connects to your Azure endpoint |
| Ollama | Depends — typically local (`localhost:11434`), but can be remote |
| llama.cpp | No — runs entirely locally |
Disable Generative AI by removing the `genai` configuration from your cameras. See [Generative AI](/configuration/genai/genai_config) for details.
### Version Check
Frigate checks GitHub for the latest release version on startup by querying `https://api.github.com`. This can be disabled:
```yaml
telemetry:
version_check: false
```
### Push Notifications
When [notifications](/configuration/notifications) are enabled and users have registered for push notifications in the web UI, Frigate sends push messages through the browser vendor's push service (e.g., Google FCM, Mozilla autopush). This requires internet access from the Frigate server to these push endpoints.
### MQTT
If an [MQTT broker](/integrations/mqtt) is configured, Frigate maintains a connection to the broker's host and port. This is typically a local network connection, but will require internet if you use a cloud-hosted MQTT broker.
### DeepStack / CodeProject.AI
When using the [DeepStack detector plugin](/configuration/object_detectors), Frigate sends images to the configured API endpoint for inference. This is typically local but depends on where the service is hosted.
## WebRTC (STUN)
For [WebRTC live streaming](/configuration/live), Frigate uses STUN for NAT traversal:
- **go2rtc** defaults to a local STUN listener (`stun:8555`) — no internet required.
- **The web UI's WebRTC player** includes a fallback to Google's public STUN server (`stun:stun.l.google.com:19302`), which requires internet.
## Home Assistant Supervisor
When running as a Home Assistant add-on, the go2rtc startup script queries the local Supervisor API (`http://supervisor/`) to discover the host IP address and WebRTC port. This is a local network call to the Home Assistant host, not an internet connection.
## What Does NOT Require Internet
- **Object detection** — CPU, EdgeTPU, OpenVINO, and other bundled detector models are included in the Docker image.
- **Recording and playback** — All video is stored and served locally.
- **Live streaming** — Camera streams are pulled over your local network. MSE and HLS streaming work without any external connections.
- **The web interface** — Fully self-contained with no external fonts, scripts, analytics, or CDN dependencies. All translations are bundled locally.
- **Custom classification inference** — After training, custom models run entirely locally.
- **Audio detection** — The YAMNet audio classification model is bundled in the Docker image.
## Running Frigate Offline
To run Frigate in an air-gapped or offline environment:
1. **Pre-download models** — Start Frigate with internet access once with all desired features enabled. Models will be cached in `/config/model_cache/`.
2. **Disable version check** — Set `telemetry.version_check: false` in your configuration.
3. **Block outbound model requests** — Set the `HF_HUB_OFFLINE=1` and `TRANSFORMERS_OFFLINE=1` environment variables to prevent HuggingFace and Transformers from attempting any network requests.
4. **Avoid cloud features** — Do not configure Frigate+, Generative AI providers that require internet, or cloud MQTT brokers.
5. **Use local model mirrors** — If limited internet is available, set the `HF_ENDPOINT`, `GITHUB_ENDPOINT`, and `GITHUB_RAW_ENDPOINT` environment variables to point to local mirrors.
After these steps, Frigate will operate with no outbound internet connections.

View File

@ -5,6 +5,12 @@ title: MQTT
These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file. These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file.
:::info
MQTT requires a network connection to your broker. This is typically local, but will require internet if using a cloud-hosted MQTT broker. See [Network Requirements](/frigate/network_requirements#mqtt) for details.
:::
## General Frigate Topics ## General Frigate Topics
### `frigate/available` ### `frigate/available`

View File

@ -5,6 +5,12 @@ title: Frigate+
For more information about how to use Frigate+ to improve your model, see the [Frigate+ docs](/plus/). For more information about how to use Frigate+ to improve your model, see the [Frigate+ docs](/plus/).
:::info
Frigate+ requires an active internet connection to communicate with `https://api.frigate.video` for model downloads, image uploads, and annotations. See [Network Requirements](/frigate/network_requirements#frigate) for details.
:::
## Setup ## Setup
### Create an account ### Create an account

View File

@ -80,3 +80,85 @@ Some users found that mounting a drive via `fstab` with the `sync` option caused
#### Copy Times < 1 second #### Copy Times < 1 second
If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves. If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves.
## I see the message: WARNING : Too many unprocessed recording segments in cache for camera. This likely indicates an issue with the detect stream...
This warning means that the detect stream for the affected camera has fallen behind or stopped processing frames. Frigate's recording cache holds segments waiting to be analyzed by the detector — when more than 6 segments pile up without being processed, Frigate discards the oldest ones to prevent the cache from filling up.
:::warning
This error is a **symptom**, not the root cause. The actual cause is always logged **before** these messages start appearing. You must review the full logs from Frigate startup through the first occurrence of this warning to identify the real issue.
:::
### Step 1: Get the full logs
Collect complete Frigate logs from startup through the first occurrence of the error. Look for errors or warnings that appear **before** the "Too many unprocessed" messages begin — that is where the root cause will be found.
### Step 2: Check the cache directory
Exec into the Frigate container and inspect the recording cache:
```
docker exec -it frigate ls -la /tmp/cache
```
Each camera should have a small number of `.mp4` segment files. If one camera has significantly more files than others, that camera is the source of the problem. A problem with a single camera can cascade and cause all cameras to show this error.
### Step 3: Verify segment duration
Recording segments should be approximately 10 seconds long. Run `ffprobe` on segments in the cache to check:
```
docker exec -it frigate ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1 /tmp/cache/<camera>@<segment>.mp4
```
If segments are only ~1 second instead of ~10 seconds, the camera is sending corrupt timestamp data, causing segments to be split too frequently and filling the cache 10x faster than expected.
**Common causes of short segments:**
- **"Smart Codec" or "Smart+" enabled on the camera** — These features dynamically change encoding parameters mid-stream, which corrupts timestamps. Disable them in your camera's settings.
- **Changing codec, bitrate, or resolution mid-stream** — Any encoding changes during an active stream can cause unpredictable segment splitting.
- **Camera firmware bugs** — Check for firmware updates from your camera manufacturer.
### Step 4: Check for a stuck detector
If the detect stream is not processing frames, segments will accumulate. Common causes:
- **Detection resolution too high** — Use a substream for detection, not the full resolution main stream.
- **Detection FPS too high** — 5 fps is the recommended maximum for detection.
- **Model too large** — Use smaller model variants (e.g., YOLO `s` or `t` size, not `e` or `x`). Use 320x320 input size rather than 640x640 unless you have a powerful dedicated detector.
- **Virtualization** — Running Frigate in a VM (especially Proxmox) can cause the detector to hang or stall. This is a known issue with GPU/TPU passthrough in virtualized environments and is not something Frigate can fix. Running Frigate in Docker on bare metal is recommended.
### Step 5: Check for GPU hangs
On the host machine, check `dmesg` for GPU-related errors:
```
dmesg | grep -i -E "gpu|drm|reset|hang"
```
Messages like `trying reset from guc_exec_queue_timedout_job` or similar GPU reset/hang messages indicate a driver or hardware issue. Ensure your kernel and GPU drivers (especially Intel) are up to date.
### Step 6: Verify hardware acceleration configuration
An incorrect `hwaccel_args` preset can cause ffmpeg to fail silently or consume excessive CPU, starving the detector of resources.
- After upgrading Frigate, verify your preset matches your hardware (e.g., `preset-intel-qsv-h264` instead of the deprecated `preset-vaapi`).
- For h265 cameras, use the corresponding h265 preset (e.g., `preset-intel-qsv-h265`).
- Note that `hwaccel_args` are only relevant for the detect stream — Frigate does not decode the record stream.
### Step 7: Verify go2rtc stream configuration
Ensure that the ffmpeg source names in your go2rtc configuration match the correct camera stream. A misconfigured stream name (e.g., copying a config from one camera to another without updating the stream reference) will cause the wrong stream to be used or the stream to fail entirely.
### Step 8: Check system resources
If none of the above apply, the issue may be a general resource constraint. Monitor the following on your host:
- **CPU usage** — An overloaded CPU can prevent the detector from keeping up.
- **RAM and swap** — Excessive swapping dramatically slows all I/O operations.
- **Disk I/O** — Use `iotop` or `iostat` to check for saturation.
- **Storage space** — Verify you have free space on the Frigate storage volume (check the Storage page in the Frigate UI).
Try temporarily disabling resource-intensive features like `genai` and `face_recognition` to see if the issue resolves. This can help isolate whether the detector is being starved of resources.

View File

@ -12,6 +12,7 @@ const sidebars: SidebarsConfig = {
"frigate/updating", "frigate/updating",
"frigate/camera_setup", "frigate/camera_setup",
"frigate/video_pipeline", "frigate/video_pipeline",
"frigate/network_requirements",
"frigate/glossary", "frigate/glossary",
], ],
Guides: [ Guides: [

View File

@ -2724,6 +2724,135 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
/exports/batch:
post:
tags:
- Export
summary: Start recording export batch
description: >-
Starts recording exports for a batch of items, each with its own camera
and time range. Optionally assigns them to a new or existing export case.
When neither export_case_id nor new_case_name is provided, exports are
added as uncategorized. Attaching to an existing case is admin-only.
operationId: export_recordings_batch_exports_batch_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/BatchExportBody"
responses:
"202":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/BatchExportResponse"
"400":
description: Bad Request
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"403":
description: Forbidden
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"503":
description: Service Unavailable
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/exports/delete:
post:
tags:
- Export
summary: Bulk delete exports
description: >-
Deletes one or more exports by ID. All IDs must exist and none can be
in-progress. Admin-only.
operationId: bulk_delete_exports_exports_delete_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportBulkDeleteBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"400":
description: Bad Request - one or more exports are in-progress
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found - one or more export IDs do not exist
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/exports/reassign:
post:
tags:
- Export
summary: Bulk reassign exports to a case
description: >-
Assigns or unassigns one or more exports to/from a case. All IDs must
exist. Pass export_case_id as null to unassign (move to uncategorized).
Admin-only.
operationId: bulk_reassign_exports_exports_reassign_post
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportBulkReassignBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"404":
description: Not Found - one or more export IDs or the target case do not exist
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
/cases: /cases:
get: get:
tags: tags:
@ -2853,39 +2982,6 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
"/export/{export_id}/case":
patch:
tags:
- Export
summary: Assign export to case
description: "Assigns an export to a case, or unassigns it if export_case_id is null."
operationId: assign_export_case_export__export_id__case_patch
parameters:
- name: export_id
in: path
required: true
schema:
type: string
title: Export Id
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ExportCaseAssignBody"
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
"/export/{camera_name}/start/{start_time}/end/{end_time}": "/export/{camera_name}/start/{start_time}/end/{end_time}":
post: post:
tags: tags:
@ -2973,32 +3069,6 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
"/export/{event_id}":
delete:
tags:
- Export
summary: Delete export
operationId: export_delete_export__event_id__delete
parameters:
- name: event_id
in: path
required: true
schema:
type: string
title: Event Id
responses:
"200":
description: Successful Response
content:
application/json:
schema:
$ref: "#/components/schemas/GenericResponse"
"422":
description: Validation Error
content:
application/json:
schema:
$ref: "#/components/schemas/HTTPValidationError"
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}": "/export/custom/{camera_name}/start/{start_time}/end/{end_time}":
post: post:
tags: tags:
@ -6501,6 +6571,149 @@ components:
required: required:
- recognizedLicensePlate - recognizedLicensePlate
title: EventsLPRBody title: EventsLPRBody
BatchExportBody:
properties:
items:
items:
$ref: "#/components/schemas/BatchExportItem"
type: array
minItems: 1
maxItems: 50
title: Items
description: List of export items. Each item has its own camera and time range.
export_case_id:
anyOf:
- type: string
maxLength: 30
- type: "null"
title: Export case ID
description: Existing export case ID to assign all exports to. Attaching to an existing case is temporarily admin-only until case-level ACLs exist.
new_case_name:
anyOf:
- type: string
maxLength: 100
- type: "null"
title: New case name
description: Name of a new export case to create when export_case_id is omitted
new_case_description:
anyOf:
- type: string
- type: "null"
title: New case description
description: Optional description for a newly created export case
type: object
required:
- items
title: BatchExportBody
BatchExportItem:
properties:
camera:
type: string
title: Camera name
start_time:
type: number
title: Start time
end_time:
type: number
title: End time
image_path:
anyOf:
- type: string
- type: "null"
title: Existing thumbnail path
description: Optional existing image to use as the export thumbnail
friendly_name:
anyOf:
- type: string
maxLength: 256
- type: "null"
title: Friendly name
description: Optional friendly name for this specific export item
client_item_id:
anyOf:
- type: string
maxLength: 128
- type: "null"
title: Client item ID
description: Optional opaque client identifier echoed back in results
type: object
required:
- camera
- start_time
- end_time
title: BatchExportItem
BatchExportResponse:
properties:
export_case_id:
anyOf:
- type: string
- type: "null"
title: Export Case Id
description: Export case ID associated with the batch
export_ids:
items:
type: string
type: array
title: Export Ids
description: Export IDs successfully queued
results:
items:
$ref: "#/components/schemas/BatchExportResultModel"
type: array
title: Results
description: Per-item batch export results
type: object
required:
- export_ids
- results
title: BatchExportResponse
description: Response model for starting an export batch.
BatchExportResultModel:
properties:
camera:
type: string
title: Camera
description: Camera name for this export attempt
export_id:
anyOf:
- type: string
- type: "null"
title: Export Id
description: The export ID when the export was successfully queued
success:
type: boolean
title: Success
description: Whether the export was successfully queued
status:
anyOf:
- type: string
- type: "null"
title: Status
description: Queue status for this camera export
error:
anyOf:
- type: string
- type: "null"
title: Error
description: Validation or queueing error for this item, if any
item_index:
anyOf:
- type: integer
- type: "null"
title: Item Index
description: Zero-based index of this result within the request items list
client_item_id:
anyOf:
- type: string
- type: "null"
title: Client Item Id
description: Opaque client-supplied item identifier echoed from the request
type: object
required:
- camera
- success
title: BatchExportResultModel
description: Per-item result for a batch export request.
EventsSubLabelBody: EventsSubLabelBody:
properties: properties:
subLabel: subLabel:
@ -6523,18 +6736,41 @@ components:
required: required:
- subLabel - subLabel
title: EventsSubLabelBody title: EventsSubLabelBody
ExportCaseAssignBody: ExportBulkDeleteBody:
properties: properties:
ids:
items:
type: string
minLength: 1
type: array
minItems: 1
title: Ids
type: object
required:
- ids
title: ExportBulkDeleteBody
description: Request body for bulk deleting exports.
ExportBulkReassignBody:
properties:
ids:
items:
type: string
minLength: 1
type: array
minItems: 1
title: Ids
export_case_id: export_case_id:
anyOf: anyOf:
- type: string - type: string
maxLength: 30 maxLength: 30
- type: "null" - type: "null"
title: Export Case Id title: Export Case Id
description: "Case ID to assign to the export, or null to unassign" description: "Case ID to assign to, or null to unassign from current case"
type: object type: object
title: ExportCaseAssignBody required:
description: Request body for assigning or unassigning an export to a case. - ids
title: ExportBulkReassignBody
description: Request body for bulk reassigning exports to a case.
ExportCaseCreateBody: ExportCaseCreateBody:
properties: properties:
name: name:

View File

@ -64,6 +64,7 @@ def require_admin_by_default():
"/logout", "/logout",
# Authenticated user endpoints (allow_any_authenticated) # Authenticated user endpoints (allow_any_authenticated)
"/profile", "/profile",
"/profiles",
# Public info endpoints (allow_public) # Public info endpoints (allow_public)
"/", "/",
"/version", "/version",
@ -87,7 +88,9 @@ def require_admin_by_default():
"/go2rtc/streams", "/go2rtc/streams",
"/event_ids", "/event_ids",
"/events", "/events",
"/cases",
"/exports", "/exports",
"/jobs/export",
} }
# Path prefixes that should be exempt (for paths with parameters) # Path prefixes that should be exempt (for paths with parameters)
@ -100,7 +103,9 @@ def require_admin_by_default():
"/go2rtc/streams/", # /go2rtc/streams/{camera} "/go2rtc/streams/", # /go2rtc/streams/{camera}
"/users/", # /users/{username}/password (has own auth) "/users/", # /users/{username}/password (has own auth)
"/preview/", # /preview/{file}/thumbnail.jpg "/preview/", # /preview/{file}/thumbnail.jpg
"/cases/", # /cases/{case_id}
"/exports/", # /exports/{export_id} "/exports/", # /exports/{export_id}
"/jobs/export/", # /jobs/export/{export_id}
"/vod/", # /vod/{camera_name}/... "/vod/", # /vod/{camera_name}/...
"/notifications/", # /notifications/pubkey, /notifications/register "/notifications/", # /notifications/pubkey, /notifications/register
) )

View File

@ -3,9 +3,11 @@
import base64 import base64
import json import json
import logging import logging
import operator
import time import time
from datetime import datetime from datetime import datetime
from typing import Any, Dict, Generator, List, Optional from functools import reduce
from typing import Any, Dict, List, Optional
import cv2 import cv2
from fastapi import APIRouter, Body, Depends, Request from fastapi import APIRouter, Body, Depends, Request
@ -17,6 +19,14 @@ from frigate.api.auth import (
get_allowed_cameras_for_filter, get_allowed_cameras_for_filter,
require_camera_access, require_camera_access,
) )
from frigate.api.chat_util import (
chunk_content,
distance_to_score,
format_events_with_local_time,
fuse_scores,
hydrate_event,
parse_iso_to_timestamp,
)
from frigate.api.defs.query.events_query_parameters import EventsQueryParams from frigate.api.defs.query.events_query_parameters import EventsQueryParams
from frigate.api.defs.request.chat_body import ChatCompletionRequest from frigate.api.defs.request.chat_body import ChatCompletionRequest
from frigate.api.defs.response.chat_response import ( from frigate.api.defs.response.chat_response import (
@ -32,55 +42,13 @@ from frigate.jobs.vlm_watch import (
start_vlm_watch_job, start_vlm_watch_job,
stop_vlm_watch_job, stop_vlm_watch_job,
) )
from frigate.models import Event
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.chat]) router = APIRouter(tags=[Tags.chat])
def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def _format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
class ToolExecuteRequest(BaseModel): class ToolExecuteRequest(BaseModel):
"""Request model for tool execution.""" """Request model for tool execution."""
@ -158,6 +126,76 @@ def get_tool_definitions() -> List[Dict[str, Any]]:
"required": [], "required": [],
}, },
}, },
{
"type": "function",
"function": {
"name": "find_similar_objects",
"description": (
"Find tracked objects that are visually and semantically similar "
"to a specific past event. Use this when the user references a "
"particular object they have seen and wants to find other "
"sightings of the same or similar one ('that green car', 'the "
"person in the red jacket', 'the package that was delivered'). "
"Prefer this over search_objects whenever the user's intent is "
"'find more like this specific one.' Use search_objects first "
"only if you need to locate the anchor event. Requires semantic "
"search to be enabled."
),
"parameters": {
"type": "object",
"properties": {
"event_id": {
"type": "string",
"description": "The id of the anchor event to find similar objects to.",
},
"after": {
"type": "string",
"description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').",
},
"before": {
"type": "string",
"description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').",
},
"cameras": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of cameras to restrict to. Defaults to all.",
},
"labels": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of labels to restrict to. Defaults to the anchor event's label.",
},
"sub_labels": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of sub_labels (names) to restrict to.",
},
"zones": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of zones. An event matches if any of its zones overlap.",
},
"similarity_mode": {
"type": "string",
"enum": ["visual", "semantic", "fused"],
"description": "Which similarity signal(s) to use. 'fused' (default) combines visual and semantic.",
"default": "fused",
},
"min_score": {
"type": "number",
"description": "Drop matches with a similarity score below this threshold (0.0-1.0).",
},
"limit": {
"type": "integer",
"description": "Maximum number of matches to return (default: 10).",
"default": 10,
},
},
"required": ["event_id"],
},
},
},
{ {
"type": "function", "type": "function",
"function": { "function": {
@ -434,6 +472,166 @@ async def _execute_search_objects(
) )
async def _execute_find_similar_objects(
request: Request,
arguments: Dict[str, Any],
allowed_cameras: List[str],
) -> Dict[str, Any]:
"""Execute the find_similar_objects tool.
Returns a plain dict (not JSONResponse) so the chat loop can embed it
directly in tool-result messages.
"""
# 1. Semantic search enabled?
config = request.app.frigate_config
if not getattr(config.semantic_search, "enabled", False):
return {
"error": "semantic_search_disabled",
"message": (
"Semantic search must be enabled to find similar objects. "
"Enable it in the Frigate config under semantic_search."
),
}
context = request.app.embeddings
if context is None:
return {
"error": "semantic_search_disabled",
"message": "Embeddings context is not available.",
}
# 2. Anchor lookup.
event_id = arguments.get("event_id")
if not event_id:
return {"error": "missing_event_id", "message": "event_id is required."}
try:
anchor = Event.get(Event.id == event_id)
except Event.DoesNotExist:
return {
"error": "anchor_not_found",
"message": f"Could not find event {event_id}.",
}
# 3. Parse params.
after = parse_iso_to_timestamp(arguments.get("after"))
before = parse_iso_to_timestamp(arguments.get("before"))
cameras = arguments.get("cameras")
if cameras:
# Respect RBAC: intersect with the user's allowed cameras.
cameras = [c for c in cameras if c in allowed_cameras]
else:
cameras = list(allowed_cameras) if allowed_cameras else None
labels = arguments.get("labels") or [anchor.label]
sub_labels = arguments.get("sub_labels")
zones = arguments.get("zones")
similarity_mode = arguments.get("similarity_mode", "fused")
if similarity_mode not in ("visual", "semantic", "fused"):
similarity_mode = "fused"
min_score = arguments.get("min_score")
limit = int(arguments.get("limit", 10))
limit = max(1, min(limit, 50))
# 4. Run similarity searches. We deliberately do NOT pass event_ids into
# the vec queries — the IN filter on sqlite-vec is broken in the installed
# version (see frigate/embeddings/__init__.py). Mirror the pattern used by
# frigate/api/event.py events_search: fetch top-k globally, then intersect
# with the structured filters via Peewee.
visual_distances: Dict[str, float] = {}
description_distances: Dict[str, float] = {}
try:
if similarity_mode in ("visual", "fused"):
rows = context.search_thumbnail(anchor)
visual_distances = {row[0]: row[1] for row in rows}
if similarity_mode in ("semantic", "fused"):
query_text = (
(anchor.data or {}).get("description")
or anchor.sub_label
or anchor.label
)
rows = context.search_description(query_text)
description_distances = {row[0]: row[1] for row in rows}
except Exception:
logger.exception("Similarity search failed")
return {
"error": "similarity_search_failed",
"message": "Failed to run similarity search.",
}
vec_ids = set(visual_distances) | set(description_distances)
vec_ids.discard(anchor.id)
# vec layer returns up to k=100 per modality; flag when we hit that ceiling
# so the LLM can mention there may be more matches beyond what we saw.
candidate_truncated = (
len(visual_distances) >= 100 or len(description_distances) >= 100
)
if not vec_ids:
return {
"anchor": hydrate_event(anchor),
"results": [],
"similarity_mode": similarity_mode,
"candidate_truncated": candidate_truncated,
}
# 5. Apply structured filters, intersected with vec hits.
clauses = [Event.id.in_(list(vec_ids))]
if after is not None:
clauses.append(Event.start_time >= after)
if before is not None:
clauses.append(Event.start_time <= before)
if cameras:
clauses.append(Event.camera.in_(cameras))
if labels:
clauses.append(Event.label.in_(labels))
if sub_labels:
clauses.append(Event.sub_label.in_(sub_labels))
if zones:
# Mirror the pattern used by frigate/api/event.py for JSON-array zone match.
zone_clauses = [Event.zones.cast("text") % f'*"{zone}"*' for zone in zones]
clauses.append(reduce(operator.or_, zone_clauses))
eligible = {e.id: e for e in Event.select().where(reduce(operator.and_, clauses))}
# 6. Fuse and rank.
scored: List[tuple[str, float]] = []
for eid in eligible:
v_score = (
distance_to_score(visual_distances[eid], context.thumb_stats)
if eid in visual_distances
else None
)
d_score = (
distance_to_score(description_distances[eid], context.desc_stats)
if eid in description_distances
else None
)
fused = fuse_scores(v_score, d_score)
if fused is None:
continue
if min_score is not None and fused < min_score:
continue
scored.append((eid, fused))
scored.sort(key=lambda pair: pair[1], reverse=True)
scored = scored[:limit]
results = [hydrate_event(eligible[eid], score=score) for eid, score in scored]
return {
"anchor": hydrate_event(anchor),
"results": results,
"similarity_mode": similarity_mode,
"candidate_truncated": candidate_truncated,
}
@router.post( @router.post(
"/chat/execute", "/chat/execute",
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
@ -459,6 +657,13 @@ async def execute_tool(
if tool_name == "search_objects": if tool_name == "search_objects":
return await _execute_search_objects(arguments, allowed_cameras) return await _execute_search_objects(arguments, allowed_cameras)
if tool_name == "find_similar_objects":
result = await _execute_find_similar_objects(
request, arguments, allowed_cameras
)
status_code = 200 if "error" not in result else 400
return JSONResponse(content=result, status_code=status_code)
if tool_name == "set_camera_state": if tool_name == "set_camera_state":
result = await _execute_set_camera_state(request, arguments) result = await _execute_set_camera_state(request, arguments)
return JSONResponse( return JSONResponse(
@ -642,6 +847,8 @@ async def _execute_tool_internal(
except (json.JSONDecodeError, AttributeError) as e: except (json.JSONDecodeError, AttributeError) as e:
logger.warning(f"Failed to extract tool result: {e}") logger.warning(f"Failed to extract tool result: {e}")
return {"error": "Failed to parse tool result"} return {"error": "Failed to parse tool result"}
elif tool_name == "find_similar_objects":
return await _execute_find_similar_objects(request, arguments, allowed_cameras)
elif tool_name == "set_camera_state": elif tool_name == "set_camera_state":
return await _execute_set_camera_state(request, arguments) return await _execute_set_camera_state(request, arguments)
elif tool_name == "get_live_context": elif tool_name == "get_live_context":
@ -664,8 +871,9 @@ async def _execute_tool_internal(
return _execute_get_recap(arguments, allowed_cameras) return _execute_get_recap(arguments, allowed_cameras)
else: else:
logger.error( logger.error(
"Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context, " "Tool call failed: unknown tool %r. Expected one of: search_objects, find_similar_objects, "
"start_camera_watch, stop_camera_watch, get_profile_status, get_recap. Arguments received: %s", "get_live_context, start_camera_watch, stop_camera_watch, get_profile_status, get_recap. "
"Arguments received: %s",
tool_name, tool_name,
json.dumps(arguments), json.dumps(arguments),
) )
@ -927,7 +1135,7 @@ async def _execute_pending_tools(
json.dumps(tool_args), json.dumps(tool_args),
) )
if tool_name == "search_objects" and isinstance(tool_result, list): if tool_name == "search_objects" and isinstance(tool_result, list):
tool_result = _format_events_with_local_time(tool_result) tool_result = format_events_with_local_time(tool_result)
_keys = { _keys = {
"id", "id",
"camera", "camera",
@ -1080,7 +1288,9 @@ Do not start your response with phrases like "I will check...", "Let me see...",
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields. Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference. When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}""" Always be accurate with time calculations based on the current date provided.
When a user refers to a specific object they have seen or describe with identifying details ("that green car", "the person in the red jacket", "a package left today"), prefer the find_similar_objects tool over search_objects. Use search_objects first only to locate the anchor event, then pass its id to find_similar_objects. For generic queries like "show me all cars today", keep using search_objects. If a user message begins with [attached_event:<id>], treat that event id as the anchor for any similarity or "tell me more" request in the same message and call find_similar_objects with that id.{cameras_section}"""
conversation.append( conversation.append(
{ {
@ -1118,6 +1328,9 @@ Always be accurate with time calculations based on the current date provided.{ca
async def stream_body_llm(): async def stream_body_llm():
nonlocal conversation, stream_tool_calls, stream_iterations nonlocal conversation, stream_tool_calls, stream_iterations
while stream_iterations < max_iterations: while stream_iterations < max_iterations:
if await request.is_disconnected():
logger.debug("Client disconnected, stopping chat stream")
return
logger.debug( logger.debug(
f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) " f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s)" f"with {len(conversation)} message(s)"
@ -1127,6 +1340,9 @@ Always be accurate with time calculations based on the current date provided.{ca
tools=tools if tools else None, tools=tools if tools else None,
tool_choice="auto", tool_choice="auto",
): ):
if await request.is_disconnected():
logger.debug("Client disconnected, stopping chat stream")
return
kind, value = event kind, value = event
if kind == "content_delta": if kind == "content_delta":
yield ( yield (
@ -1156,6 +1372,11 @@ Always be accurate with time calculations based on the current date provided.{ca
msg.get("content"), pending msg.get("content"), pending
) )
) )
if await request.is_disconnected():
logger.debug(
"Client disconnected before tool execution"
)
return
( (
executed_calls, executed_calls,
tool_results, tool_results,
@ -1240,7 +1461,7 @@ Always be accurate with time calculations based on the current date provided.{ca
+ b"\n" + b"\n"
) )
# Stream content in word-sized chunks for smooth UX # Stream content in word-sized chunks for smooth UX
for part in _chunk_content(final_content): for part in chunk_content(final_content):
yield ( yield (
json.dumps({"type": "content", "delta": part}).encode( json.dumps({"type": "content", "delta": part}).encode(
"utf-8" "utf-8"

135
frigate/api/chat_util.py Normal file
View File

@ -0,0 +1,135 @@
"""Pure, stateless helpers used by the chat tool dispatchers.
These were extracted from frigate/api/chat.py to keep that module focused on
route handlers, tool dispatchers, and streaming loop internals. Nothing in
this file touches the FastAPI request, the embeddings context, or the chat
loop state all inputs and outputs are plain data.
"""
import logging
import math
import time
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional
from frigate.embeddings.util import ZScoreNormalization
from frigate.models import Event
logger = logging.getLogger(__name__)
# Similarity fusion weights for find_similar_objects.
# Visual dominates because the feature's primary use case is "same specific object."
# If these change, update the test in test_chat_find_similar_objects.py.
VISUAL_WEIGHT = 0.65
DESCRIPTION_WEIGHT = 0.35
def chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
def distance_to_score(distance: float, stats: ZScoreNormalization) -> float:
"""Convert a cosine distance to a [0, 1] similarity score.
Uses the existing ZScoreNormalization stats maintained by EmbeddingsContext
to normalize across deployments, then a bounded sigmoid. Lower distance ->
higher score. If stats are uninitialized (stddev == 0), returns a neutral
0.5 so the fallback ordering by raw distance still dominates.
"""
if stats.stddev == 0:
return 0.5
z = (distance - stats.mean) / stats.stddev
# Sigmoid on -z so that small distance (good) -> high score.
return 1.0 / (1.0 + math.exp(z))
def fuse_scores(
visual_score: Optional[float],
description_score: Optional[float],
) -> Optional[float]:
"""Weighted fusion of visual and description similarity scores.
If one side is missing (e.g., no description embedding for this event),
the other side's score is returned alone with no penalty. If both are
missing, returns None and the caller should drop the event.
"""
if visual_score is None and description_score is None:
return None
if visual_score is None:
return description_score
if description_score is None:
return visual_score
return VISUAL_WEIGHT * visual_score + DESCRIPTION_WEIGHT * description_score
def parse_iso_to_timestamp(value: Optional[str]) -> Optional[float]:
"""Parse an ISO-8601 string as server-local time -> unix timestamp.
Mirrors the parsing _execute_search_objects uses so both tools accept the
same format from the LLM.
"""
if value is None:
return None
try:
s = value.replace("Z", "").strip()[:19]
dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return time.mktime(dt.timetuple())
except (ValueError, AttributeError, TypeError):
logger.warning("Invalid timestamp format: %s", value)
return None
def hydrate_event(event: Event, score: Optional[float] = None) -> Dict[str, Any]:
"""Convert an Event row into the dict shape returned by find_similar_objects."""
data: Dict[str, Any] = {
"id": event.id,
"camera": event.camera,
"label": event.label,
"sub_label": event.sub_label,
"start_time": event.start_time,
"end_time": event.end_time,
"zones": event.zones,
}
if score is not None:
data["score"] = score
return data

View File

@ -0,0 +1,65 @@
from typing import List, Optional
from pydantic import BaseModel, Field, model_validator
MAX_BATCH_EXPORT_ITEMS = 50
class BatchExportItem(BaseModel):
camera: str = Field(title="Camera name")
start_time: float = Field(title="Start time")
end_time: float = Field(title="End time")
image_path: Optional[str] = Field(
default=None,
title="Existing thumbnail path",
description="Optional existing image to use as the export thumbnail",
)
friendly_name: Optional[str] = Field(
default=None,
title="Friendly name",
max_length=256,
description="Optional friendly name for this specific export item",
)
client_item_id: Optional[str] = Field(
default=None,
title="Client item ID",
max_length=128,
description="Optional opaque client identifier echoed back in results",
)
class BatchExportBody(BaseModel):
items: List[BatchExportItem] = Field(
title="Items",
min_length=1,
max_length=MAX_BATCH_EXPORT_ITEMS,
description="List of export items. Each item has its own camera and time range.",
)
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description=(
"Existing export case ID to assign all exports to. Attaching to an "
"existing case is temporarily admin-only until case-level ACLs exist."
),
)
new_case_name: Optional[str] = Field(
default=None,
title="New case name",
max_length=100,
description="Name of a new export case to create when export_case_id is omitted",
)
new_case_description: Optional[str] = Field(
default=None,
title="New case description",
description="Optional description for a newly created export case",
)
@model_validator(mode="after")
def validate_case_target(self) -> "BatchExportBody":
for item in self.items:
if item.end_time <= item.start_time:
raise ValueError("end_time must be after start_time")
return self

View File

@ -0,0 +1,24 @@
"""Request bodies for bulk export operations."""
from typing import Optional
from pydantic import BaseModel, Field, conlist, constr
class ExportBulkDeleteBody(BaseModel):
"""Request body for bulk deleting exports."""
# List of export IDs with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)
class ExportBulkReassignBody(BaseModel):
"""Request body for bulk reassigning exports to a case."""
# List of export IDs with at least one element and each element with at least one char
ids: conlist(constr(min_length=1), min_length=1)
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to, or null to unassign from current case",
)

View File

@ -23,13 +23,3 @@ class ExportCaseUpdateBody(BaseModel):
description: Optional[str] = Field( description: Optional[str] = Field(
default=None, description="Updated description of the export case" default=None, description="Updated description of the export case"
) )
class ExportCaseAssignBody(BaseModel):
"""Request body for assigning or unassigning an export to a case."""
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to the export, or null to unassign",
)

View File

@ -1,4 +1,4 @@
from typing import List, Optional from typing import Any, List, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -28,6 +28,88 @@ class StartExportResponse(BaseModel):
export_id: Optional[str] = Field( export_id: Optional[str] = Field(
default=None, description="The export ID if successfully started" default=None, description="The export ID if successfully started"
) )
status: Optional[str] = Field(
default=None,
description="Queue status for the export job",
)
class BatchExportResultModel(BaseModel):
"""Per-item result for a batch export request."""
camera: str = Field(description="Camera name for this export attempt")
export_id: Optional[str] = Field(
default=None,
description="The export ID when the export was successfully queued",
)
success: bool = Field(description="Whether the export was successfully queued")
status: Optional[str] = Field(
default=None,
description="Queue status for this camera export",
)
error: Optional[str] = Field(
default=None,
description="Validation or queueing error for this item, if any",
)
item_index: Optional[int] = Field(
default=None,
description="Zero-based index of this result within the request items list",
)
client_item_id: Optional[str] = Field(
default=None,
description="Opaque client-supplied item identifier echoed from the request",
)
class BatchExportResponse(BaseModel):
"""Response model for starting an export batch."""
export_case_id: Optional[str] = Field(
default=None,
description="Export case ID associated with the batch",
)
export_ids: List[str] = Field(description="Export IDs successfully queued")
results: List[BatchExportResultModel] = Field(
description="Per-item batch export results"
)
class ExportJobModel(BaseModel):
"""Model representing a queued or running export job."""
id: str = Field(description="Unique identifier for the export job")
job_type: str = Field(description="Job type")
status: str = Field(description="Current job status")
camera: str = Field(description="Camera associated with this export job")
name: Optional[str] = Field(
default=None,
description="Friendly name for the export",
)
export_case_id: Optional[str] = Field(
default=None,
description="ID of the export case this export belongs to",
)
request_start_time: float = Field(description="Requested export start time")
request_end_time: float = Field(description="Requested export end time")
start_time: Optional[float] = Field(
default=None,
description="Unix timestamp when execution started",
)
end_time: Optional[float] = Field(
default=None,
description="Unix timestamp when execution completed",
)
error_message: Optional[str] = Field(
default=None,
description="Error message for failed jobs",
)
results: Optional[dict[str, Any]] = Field(
default=None,
description="Result metadata for completed jobs",
)
ExportJobsResponse = List[ExportJobModel]
ExportsResponse = List[ExportModel] ExportsResponse = List[ExportModel]

File diff suppressed because it is too large Load Diff

View File

@ -52,6 +52,7 @@ from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
from frigate.events.audio import AudioProcessor from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup from frigate.events.cleanup import EventCleanup
from frigate.events.maintainer import EventProcessor from frigate.events.maintainer import EventProcessor
from frigate.jobs.export import reap_stale_exports
from frigate.jobs.motion_search import stop_all_motion_search_jobs from frigate.jobs.motion_search import stop_all_motion_search_jobs
from frigate.log import _stop_logging from frigate.log import _stop_logging
from frigate.models import ( from frigate.models import (
@ -611,6 +612,11 @@ class FrigateApp:
# Clean up any stale replay camera artifacts (filesystem + DB) # Clean up any stale replay camera artifacts (filesystem + DB)
cleanup_replay_cameras() cleanup_replay_cameras()
# Reap any Export rows still marked in_progress from a previous
# session (crash, kill, broken migration). Runs synchronously before
# uvicorn binds so no API request can observe a stale row.
reap_stale_exports()
self.init_inter_process_communicator() self.init_inter_process_communicator()
self.start_detectors() self.start_detectors()
self.init_dispatcher() self.init_dispatcher()

View File

@ -92,6 +92,12 @@ class RecordExportConfig(FrigateBaseModel):
title="Export hwaccel args", title="Export hwaccel args",
description="Hardware acceleration args to use for export/transcode operations.", description="Hardware acceleration args to use for export/transcode operations.",
) )
max_concurrent: int = Field(
default=3,
ge=1,
title="Maximum concurrent exports",
description="Maximum number of export jobs to process at the same time.",
)
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):

View File

@ -1,8 +1,12 @@
"""Local only processors for handling real time object processing.""" """Local only processors for handling real time object processing."""
import logging import logging
import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Any from collections import deque
from concurrent.futures import Future
from queue import Empty, Full, Queue
from typing import Any, Callable
import numpy as np import numpy as np
@ -74,3 +78,123 @@ class RealTimeProcessorApi(ABC):
payload: The updated configuration object. payload: The updated configuration object.
""" """
pass pass
def drain_results(self) -> list[dict[str, Any]]:
"""Return pending results that need IPC side-effects.
Deferred processors accumulate results on a worker thread.
The maintainer calls this each loop iteration to collect them
and perform publishes on the main thread.
Synchronous processors return an empty list (default).
"""
return []
def shutdown(self) -> None:
"""Stop any background work and release resources.
Called when the processor is being removed or the maintainer
is shutting down. Default is a no-op for synchronous processors.
"""
pass
class DeferredRealtimeProcessorApi(RealTimeProcessorApi):
"""Base class for processors that offload heavy work to a background thread.
Subclasses implement:
- process_frame(): do cheap gating + crop + copy, then call _enqueue_task()
- _process_task(task): heavy work (inference, consensus) on the worker thread
- handle_request(): optionally use _enqueue_request() for sync request/response
- expire_object(): call _enqueue_task() with a control message
The worker thread owns all processor state. No locks are needed because
only the worker mutates state. Results that need IPC are placed in
_pending_results via _emit_result(), and the maintainer drains them
each loop iteration.
"""
def __init__(
self,
config: FrigateConfig,
metrics: DataProcessorMetrics,
max_queue: int = 8,
) -> None:
super().__init__(config, metrics)
self._task_queue: Queue = Queue(maxsize=max_queue)
self._pending_results: deque[dict[str, Any]] = deque()
self._results_lock = threading.Lock()
self._stop_event = threading.Event()
self._worker = threading.Thread(
target=self._drain_loop,
daemon=True,
name=f"{type(self).__name__}_worker",
)
self._worker.start()
def _drain_loop(self) -> None:
"""Worker thread main loop — drains the task queue until stopped."""
while not self._stop_event.is_set():
try:
task = self._task_queue.get(timeout=0.5)
except Empty:
continue
if (
isinstance(task, tuple)
and len(task) == 2
and isinstance(task[1], Future)
):
# Request/response: (callable_and_args, future)
(func, args), future = task
try:
result = func(args)
future.set_result(result)
except Exception as e:
future.set_exception(e)
else:
try:
self._process_task(task)
except Exception:
logger.exception("Error processing deferred task")
def _enqueue_task(self, task: Any) -> bool:
"""Enqueue a task for the worker. Returns False if queue is full (dropped)."""
try:
self._task_queue.put_nowait(task)
return True
except Full:
logger.debug("Deferred processor queue full, dropping task")
return False
def _enqueue_request(self, func: Callable, args: Any, timeout: float = 10.0) -> Any:
"""Enqueue a request and block until the worker returns a result."""
future: Future = Future()
self._task_queue.put(((func, args), future), timeout=timeout)
return future.result(timeout=timeout)
def _emit_result(self, result: dict[str, Any]) -> None:
"""Called by the worker thread to stage a result for the maintainer."""
with self._results_lock:
self._pending_results.append(result)
def drain_results(self) -> list[dict[str, Any]]:
"""Called by the maintainer on the main thread to collect pending results."""
with self._results_lock:
results = list(self._pending_results)
self._pending_results.clear()
return results
def shutdown(self) -> None:
"""Signal the worker to stop and wait for it to finish."""
self._stop_event.set()
self._worker.join(timeout=5.0)
@abstractmethod
def _process_task(self, task: Any) -> None:
"""Process a single task on the worker thread.
Subclasses implement inference, consensus, training image saves here.
Call _emit_result() to stage results for the maintainer to publish.
"""
pass

View File

@ -1,7 +1,6 @@
"""Real time processor that works with classification tflite models.""" """Real time processor that works with classification tflite models."""
import datetime import datetime
import json
import logging import logging
import os import os
from typing import Any from typing import Any
@ -10,25 +9,18 @@ import cv2
import numpy as np import numpy as np
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.event_metadata_updater import ( from frigate.comms.event_metadata_updater import EventMetadataPublisher
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.classification import ( from frigate.config.classification import CustomClassificationConfig
CustomClassificationConfig,
ObjectClassificationType,
)
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
from frigate.log import suppress_stderr_during from frigate.log import suppress_stderr_during
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
from frigate.util.image import calculate_region from frigate.util.image import calculate_region
from frigate.util.object import box_overlaps from frigate.util.object import box_overlaps
from ..types import DataProcessorMetrics from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi from .api import DeferredRealtimeProcessorApi
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
@ -40,7 +32,7 @@ logger = logging.getLogger(__name__)
MAX_OBJECT_CLASSIFICATIONS = 16 MAX_OBJECT_CLASSIFICATIONS = 16
class CustomStateClassificationProcessor(RealTimeProcessorApi): class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
@ -48,7 +40,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
): ):
super().__init__(config, metrics) super().__init__(config, metrics, max_queue=4)
self.model_config = model_config self.model_config = model_config
if not self.model_config.name: if not self.model_config.name:
@ -259,14 +251,34 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
) )
return return
frame = rgb[y1:y2, x1:x2] cropped_frame = rgb[y1:y2, x1:x2]
try: try:
resized_frame = cv2.resize(frame, (224, 224)) resized_frame = cv2.resize(cropped_frame, (224, 224))
except Exception: except Exception:
logger.warning("Failed to resize image for state classification") logger.warning("Failed to resize image for state classification")
return return
# Copy for training image saves on worker thread
crop_bgr = cv2.cvtColor(cropped_frame, cv2.COLOR_RGB2BGR)
self._enqueue_task(("classify", camera, now, resized_frame, crop_bgr))
def _process_task(self, task: Any) -> None:
kind = task[0]
if kind == "classify":
_, camera, timestamp, resized_frame, crop_bgr = task
self._classify_state(camera, timestamp, resized_frame, crop_bgr)
elif kind == "reload":
self.__build_detector()
def _classify_state(
self,
camera: str,
timestamp: float,
resized_frame: np.ndarray,
crop_bgr: np.ndarray,
) -> None:
if self.interpreter is None: if self.interpreter is None:
# When interpreter is None, always save (score is 0.0, which is < 1.0) # When interpreter is None, always save (score is 0.0, which is < 1.0)
if self._should_save_image(camera, "unknown", 0.0): if self._should_save_image(camera, "unknown", 0.0):
@ -277,15 +289,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), crop_bgr,
"none-none", "none-none",
now, timestamp,
"unknown", "unknown",
0.0, 0.0,
max_files=save_attempts, max_files=save_attempts,
) )
return return
if not self.tensor_input_details or not self.tensor_output_details:
return
input = np.expand_dims(resized_frame, axis=0) input = np.expand_dims(resized_frame, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke() self.interpreter.invoke()
@ -298,7 +313,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
) )
best_id = int(np.argmax(probs)) best_id = int(np.argmax(probs))
score = round(probs[best_id], 2) score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - now) self.__update_metrics(datetime.datetime.now().timestamp() - timestamp)
detected_state = self.labelmap[best_id] detected_state = self.labelmap[best_id]
@ -310,9 +325,9 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), crop_bgr,
"none-none", "none-none",
now, timestamp,
detected_state, detected_state,
score, score,
max_files=save_attempts, max_files=save_attempts,
@ -327,9 +342,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
verified_state = self.verify_state_change(camera, detected_state) verified_state = self.verify_state_change(camera, detected_state)
if verified_state is not None: if verified_state is not None:
self.requestor.send_data( self._emit_result(
f"{camera}/classification/{self.model_config.name}", {
verified_state, "type": "classification",
"processor": "state",
"model_name": self.model_config.name,
"camera": camera,
"state": verified_state,
}
) )
def handle_request( def handle_request(
@ -337,14 +357,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
) -> dict[str, Any] | None: ) -> dict[str, Any] | None:
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:
if request_data.get("model_name") == self.model_config.name: if request_data.get("model_name") == self.model_config.name:
self.__build_detector()
logger.info( def _do_reload(data: dict[str, Any]) -> dict[str, Any]:
f"Successfully loaded updated model for {self.model_config.name}" self.__build_detector()
) logger.info(
return { f"Successfully loaded updated model for {self.model_config.name}"
"success": True, )
"message": f"Loaded {self.model_config.name} model.", return {
} "success": True,
"message": f"Loaded {self.model_config.name} model.",
}
result: dict[str, Any] = self._enqueue_request(_do_reload, request_data)
return result
else: else:
return None return None
else: else:
@ -354,7 +379,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
pass pass
class CustomObjectClassificationProcessor(RealTimeProcessorApi): class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
@ -363,7 +388,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
): ):
super().__init__(config, metrics) super().__init__(config, metrics, max_queue=8)
self.model_config = model_config self.model_config = model_config
if not self.model_config.name: if not self.model_config.name:
@ -536,18 +561,41 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
) )
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
crop = rgb[ crop = rgb[y:y2, x:x2]
y:y2,
x:x2,
]
if crop.shape != (224, 224): try:
try: resized_crop = cv2.resize(crop, (224, 224))
resized_crop = cv2.resize(crop, (224, 224)) except Exception:
except Exception: logger.warning("Failed to resize image for object classification")
logger.warning("Failed to resize image for state classification") return
return
# Copy crop for training images (will be used on worker thread)
crop_bgr = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR)
self._enqueue_task(
("classify", object_id, obj_data["camera"], now, resized_crop, crop_bgr)
)
def _process_task(self, task: Any) -> None:
kind = task[0]
if kind == "classify":
_, object_id, camera, timestamp, resized_crop, crop_bgr = task
self._classify_object(object_id, camera, timestamp, resized_crop, crop_bgr)
elif kind == "expire":
_, object_id = task
if object_id in self.classification_history:
self.classification_history.pop(object_id)
elif kind == "reload":
self.__build_detector()
def _classify_object(
self,
object_id: str,
camera: str,
timestamp: float,
resized_crop: np.ndarray,
crop_bgr: np.ndarray,
) -> None:
if self.interpreter is None: if self.interpreter is None:
save_attempts = ( save_attempts = (
self.model_config.save_attempts self.model_config.save_attempts
@ -556,9 +604,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), crop_bgr,
object_id, object_id,
now, timestamp,
"unknown", "unknown",
0.0, 0.0,
max_files=save_attempts, max_files=save_attempts,
@ -569,7 +617,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
if object_id not in self.classification_history: if object_id not in self.classification_history:
self.classification_history[object_id] = [] self.classification_history[object_id] = []
self.classification_history[object_id].append(("unknown", 0.0, now)) self.classification_history[object_id].append(("unknown", 0.0, timestamp))
return
if not self.tensor_input_details or not self.tensor_output_details:
return return
input = np.expand_dims(resized_crop, axis=0) input = np.expand_dims(resized_crop, axis=0)
@ -584,7 +635,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
) )
best_id = int(np.argmax(probs)) best_id = int(np.argmax(probs))
score = round(probs[best_id], 2) score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - now) self.__update_metrics(datetime.datetime.now().timestamp() - timestamp)
save_attempts = ( save_attempts = (
self.model_config.save_attempts self.model_config.save_attempts
@ -593,9 +644,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
) )
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), crop_bgr,
object_id, object_id,
now, timestamp,
self.labelmap[best_id], self.labelmap[best_id],
score, score,
max_files=save_attempts, max_files=save_attempts,
@ -610,92 +661,57 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
sub_label = self.labelmap[best_id] sub_label = self.labelmap[best_id]
logger.debug( logger.debug(
f"{self.model_config.name}: Object {object_id} (label={obj_data['label']}) passed threshold with sub_label={sub_label}, score={score}" f"{self.model_config.name}: Object {object_id} passed threshold with sub_label={sub_label}, score={score}"
) )
consensus_label, consensus_score = self.get_weighted_score( consensus_label, consensus_score = self.get_weighted_score(
object_id, sub_label, score, now object_id, sub_label, score, timestamp
) )
logger.debug( logger.debug(
f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}" f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}"
) )
if consensus_label is not None: if consensus_label is not None and self.model_config.object_config is not None:
camera = obj_data["camera"] self._emit_result(
logger.debug( {
f"{self.model_config.name}: Publishing sub_label={consensus_label} for {obj_data['label']} object {object_id} on {camera}" "type": "classification",
"processor": "object",
"model_name": self.model_config.name,
"classification_type": self.model_config.object_config.classification_type,
"object_id": object_id,
"camera": camera,
"timestamp": timestamp,
"label": consensus_label,
"score": consensus_score,
}
) )
if ( def handle_request(
self.model_config.object_config.classification_type self, topic: str, request_data: dict[str, Any]
== ObjectClassificationType.sub_label ) -> dict[str, Any] | None:
):
self.sub_label_publisher.publish(
(object_id, consensus_label, consensus_score),
EventMetadataTypeEnum.sub_label,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"sub_label": consensus_label,
"score": consensus_score,
}
),
)
elif (
self.model_config.object_config.classification_type
== ObjectClassificationType.attribute
):
self.sub_label_publisher.publish(
(
object_id,
self.model_config.name,
consensus_label,
consensus_score,
),
EventMetadataTypeEnum.attribute.value,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": now,
"model": self.model_config.name,
"attribute": consensus_label,
"score": consensus_score,
}
),
)
def handle_request(self, topic: str, request_data: dict) -> dict | None:
if topic == EmbeddingsRequestEnum.reload_classification_model.value: if topic == EmbeddingsRequestEnum.reload_classification_model.value:
if request_data.get("model_name") == self.model_config.name: if request_data.get("model_name") == self.model_config.name:
self.__build_detector()
logger.info( def _do_reload(data: dict[str, Any]) -> dict[str, Any]:
f"Successfully loaded updated model for {self.model_config.name}" self.__build_detector()
) logger.info(
return { f"Successfully loaded updated model for {self.model_config.name}"
"success": True, )
"message": f"Loaded {self.model_config.name} model.", return {
} "success": True,
"message": f"Loaded {self.model_config.name} model.",
}
result: dict[str, Any] = self._enqueue_request(_do_reload, request_data)
return result
else: else:
return None return None
else: else:
return None return None
def expire_object(self, object_id: str, camera: str) -> None: def expire_object(self, object_id: str, camera: str) -> None:
if object_id in self.classification_history: self._enqueue_task(("expire", object_id))
self.classification_history.pop(object_id)
def write_classification_attempt( def write_classification_attempt(

View File

@ -2,6 +2,7 @@
import base64 import base64
import datetime import datetime
import json
import logging import logging
import threading import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
@ -33,6 +34,7 @@ from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber, CameraConfigUpdateSubscriber,
) )
from frigate.config.classification import ObjectClassificationType
from frigate.data_processing.common.license_plate.model import ( from frigate.data_processing.common.license_plate.model import (
LicensePlateModelRunner, LicensePlateModelRunner,
) )
@ -61,6 +63,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import GenAIClientManager from frigate.genai import GenAIClientManager
from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.file import get_event_thumbnail_bytes from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager
@ -274,10 +277,15 @@ class EmbeddingMaintainer(threading.Thread):
self._process_recordings_updates() self._process_recordings_updates()
self._process_review_updates() self._process_review_updates()
self._process_frame_updates() self._process_frame_updates()
self._process_deferred_results()
self._expire_dedicated_lpr() self._expire_dedicated_lpr()
self._process_finalized() self._process_finalized()
self._process_event_metadata() self._process_event_metadata()
# Shutdown deferred processors
for processor in self.realtime_processors:
processor.shutdown()
self.config_updater.stop() self.config_updater.stop()
self.enrichment_config_subscriber.stop() self.enrichment_config_subscriber.stop()
self.event_subscriber.stop() self.event_subscriber.stop()
@ -316,10 +324,9 @@ class EmbeddingMaintainer(threading.Thread):
model_name = topic.split("/")[-1] model_name = topic.split("/")[-1]
if model_config is None: if model_config is None:
self.realtime_processors = [ remaining = []
processor for processor in self.realtime_processors:
for processor in self.realtime_processors if (
if not (
isinstance( isinstance(
processor, processor,
( (
@ -328,8 +335,11 @@ class EmbeddingMaintainer(threading.Thread):
), ),
) )
and processor.model_config.name == model_name and processor.model_config.name == model_name
) ):
] processor.shutdown()
else:
remaining.append(processor)
self.realtime_processors = remaining
logger.info( logger.info(
f"Successfully removed classification processor for model: {model_name}" f"Successfully removed classification processor for model: {model_name}"
@ -697,6 +707,68 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager.close(frame_name) self.frame_manager.close(frame_name)
def _process_deferred_results(self) -> None:
"""Drain results from deferred processors and perform IPC side-effects."""
for processor in self.realtime_processors:
results = processor.drain_results()
for result in results:
if result.get("type") != "classification":
continue
if result["processor"] == "state":
self.requestor.send_data(
f"{result['camera']}/classification/{result['model_name']}",
result["state"],
)
elif result["processor"] == "object":
object_id = result["object_id"]
camera = result["camera"]
timestamp = result["timestamp"]
model_name = result["model_name"]
label = result["label"]
score = result["score"]
classification_type = result["classification_type"]
if classification_type == ObjectClassificationType.sub_label:
self.event_metadata_publisher.publish(
(object_id, label, score),
EventMetadataTypeEnum.sub_label,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": timestamp,
"model": model_name,
"sub_label": label,
"score": score,
}
),
)
elif classification_type == ObjectClassificationType.attribute:
self.event_metadata_publisher.publish(
(object_id, model_name, label, score),
EventMetadataTypeEnum.attribute.value,
)
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.classification,
"id": object_id,
"camera": camera,
"timestamp": timestamp,
"model": model_name,
"attribute": label,
"score": score,
}
),
)
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None: def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
"""Embed the thumbnail for an event.""" """Embed the thumbnail for an event."""
if not self.config.semantic_search.enabled: if not self.config.semantic_search.enabled:

387
frigate/jobs/export.py Normal file
View File

@ -0,0 +1,387 @@
"""Export job management with queued background execution."""
import logging
import os
import threading
import time
from dataclasses import dataclass
from pathlib import Path
from queue import Full, Queue
from typing import Any, Optional
from peewee import DoesNotExist
from frigate.config import FrigateConfig
from frigate.jobs.job import Job
from frigate.models import Export
from frigate.record.export import PlaybackSourceEnum, RecordingExporter
from frigate.types import JobStatusTypesEnum
logger = logging.getLogger(__name__)
# Maximum number of jobs that can sit in the queue waiting to run.
# Prevents a runaway client from unbounded memory growth.
MAX_QUEUED_EXPORT_JOBS = 100
class ExportQueueFullError(RuntimeError):
"""Raised when the export queue is at capacity."""
@dataclass
class ExportJob(Job):
"""Job state for export operations."""
job_type: str = "export"
camera: str = ""
name: Optional[str] = None
image_path: Optional[str] = None
export_case_id: Optional[str] = None
request_start_time: float = 0.0
request_end_time: float = 0.0
playback_source: str = PlaybackSourceEnum.recordings.value
ffmpeg_input_args: Optional[str] = None
ffmpeg_output_args: Optional[str] = None
cpu_fallback: bool = False
def to_dict(self) -> dict[str, Any]:
"""Convert to dictionary for API responses.
Only exposes fields that are part of the public ExportJobModel schema.
Internal execution details (image_path, ffmpeg args, cpu_fallback) are
intentionally omitted so they don't leak through the API.
"""
return {
"id": self.id,
"job_type": self.job_type,
"status": self.status,
"camera": self.camera,
"name": self.name,
"export_case_id": self.export_case_id,
"request_start_time": self.request_start_time,
"request_end_time": self.request_end_time,
"start_time": self.start_time,
"end_time": self.end_time,
"error_message": self.error_message,
"results": self.results,
}
class ExportQueueWorker(threading.Thread):
"""Worker that executes queued exports."""
def __init__(self, manager: "ExportJobManager", worker_index: int) -> None:
super().__init__(
daemon=True,
name=f"export_queue_worker_{worker_index}",
)
self.manager = manager
def run(self) -> None:
while True:
job = self.manager.queue.get()
try:
self.manager.run_job(job)
except Exception:
logger.exception(
"Export queue worker failed while processing %s", job.id
)
finally:
self.manager.queue.task_done()
class ExportJobManager:
"""Concurrency-limited manager for queued export jobs."""
def __init__(
self,
config: FrigateConfig,
max_concurrent: int,
max_queued: int = MAX_QUEUED_EXPORT_JOBS,
) -> None:
self.config = config
self.max_concurrent = max(1, max_concurrent)
self.queue: Queue[ExportJob] = Queue(maxsize=max(1, max_queued))
self.jobs: dict[str, ExportJob] = {}
self.lock = threading.Lock()
self.workers: list[ExportQueueWorker] = []
self.started = False
def ensure_started(self) -> None:
"""Ensure worker threads are started exactly once."""
with self.lock:
if self.started:
self._restart_dead_workers_locked()
return
for index in range(self.max_concurrent):
worker = ExportQueueWorker(self, index)
worker.start()
self.workers.append(worker)
self.started = True
def _restart_dead_workers_locked(self) -> None:
for index, worker in enumerate(self.workers):
if worker.is_alive():
continue
logger.error(
"Export queue worker %s died unexpectedly, restarting", worker.name
)
replacement = ExportQueueWorker(self, index)
replacement.start()
self.workers[index] = replacement
def enqueue(self, job: ExportJob) -> str:
"""Queue a job for background execution.
Raises ExportQueueFullError if the queue is at capacity.
"""
self.ensure_started()
try:
self.queue.put_nowait(job)
except Full as err:
raise ExportQueueFullError(
"Export queue is full; try again once current exports finish"
) from err
with self.lock:
self.jobs[job.id] = job
return job.id
def get_job(self, job_id: str) -> Optional[ExportJob]:
"""Get a job by ID."""
with self.lock:
return self.jobs.get(job_id)
def list_active_jobs(self) -> list[ExportJob]:
"""List queued and running jobs."""
with self.lock:
return [
job
for job in self.jobs.values()
if job.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running)
]
def cancel_queued_jobs_for_case(self, case_id: str) -> list[ExportJob]:
"""Cancel queued export jobs assigned to a deleted case."""
cancelled_jobs: list[ExportJob] = []
with self.lock:
with self.queue.mutex:
retained_jobs: list[ExportJob] = []
while self.queue.queue:
job = self.queue.queue.popleft()
if (
job.export_case_id == case_id
and job.status == JobStatusTypesEnum.queued
):
job.status = JobStatusTypesEnum.cancelled
job.end_time = time.time()
cancelled_jobs.append(job)
continue
retained_jobs.append(job)
self.queue.queue.extend(retained_jobs)
if cancelled_jobs:
self.queue.unfinished_tasks = max(
0,
self.queue.unfinished_tasks - len(cancelled_jobs),
)
if self.queue.unfinished_tasks == 0:
self.queue.all_tasks_done.notify_all()
self.queue.not_full.notify_all()
return cancelled_jobs
def available_slots(self) -> int:
"""Approximate number of additional jobs that could be queued right now.
Uses Queue.qsize() which is best-effort; callers should treat the
result as advisory since another thread could enqueue between
checking and enqueueing.
"""
return max(0, self.queue.maxsize - self.queue.qsize())
def run_job(self, job: ExportJob) -> None:
"""Execute a queued export job."""
job.status = JobStatusTypesEnum.running
job.start_time = time.time()
exporter = RecordingExporter(
self.config,
job.id,
job.camera,
job.name,
job.image_path,
int(job.request_start_time),
int(job.request_end_time),
PlaybackSourceEnum(job.playback_source),
job.export_case_id,
job.ffmpeg_input_args,
job.ffmpeg_output_args,
job.cpu_fallback,
)
try:
exporter.run()
export = Export.get_or_none(Export.id == job.id)
if export is None:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export failed"
elif export.in_progress:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export did not complete"
else:
job.status = JobStatusTypesEnum.success
job.results = {
"export_id": export.id,
"export_case_id": export.export_case_id,
"video_path": export.video_path,
"thumb_path": export.thumb_path,
}
except DoesNotExist:
job.status = JobStatusTypesEnum.failed
job.error_message = "Export not found"
except Exception as err:
logger.exception("Export job %s failed: %s", job.id, err)
job.status = JobStatusTypesEnum.failed
job.error_message = str(err)
finally:
job.end_time = time.time()
_job_manager: Optional[ExportJobManager] = None
_job_manager_lock = threading.Lock()
def _get_max_concurrent(config: FrigateConfig) -> int:
return int(config.record.export.max_concurrent)
def reap_stale_exports() -> None:
"""Sweep Export rows stuck with in_progress=True from previous sessions.
On Frigate startup no export job is alive yet, so any in_progress=True
row must be a leftover from a previous session that crashed, was killed
mid-export, or returned early from RecordingExporter.run() without
flipping the flag. For each stale row we either:
- delete the row (and any thumb) if the video file is missing or empty,
since there is nothing worth recovering
- flip in_progress to False if the video file exists on disk and is
non-empty, treating it as a completed export the user can manage
through the normal UI
Must only be called when the export job manager is certain to have no
active jobs i.e., at Frigate startup, before any worker runs.
All exceptions are caught and logged; the caller does not need to wrap
this in a try/except. A failure on a single row will not stop the rest
of the sweep, and a failure in the top-level query will log and return.
"""
try:
stale_exports = list(Export.select().where(Export.in_progress == True)) # noqa: E712
except Exception:
logger.exception("Failed to query stale in-progress exports")
return
if not stale_exports:
logger.debug("No stale in-progress exports found on startup")
return
flipped = 0
deleted = 0
errored = 0
for export in stale_exports:
try:
video_path = export.video_path
has_usable_file = False
if video_path:
try:
has_usable_file = os.path.getsize(video_path) > 0
except OSError:
has_usable_file = False
if has_usable_file:
# Unassign from any case on recovery: the user should
# re-triage a recovered export rather than have it silently
# reappear inside a case they curated.
Export.update(
{Export.in_progress: False, Export.export_case: None}
).where(Export.id == export.id).execute()
flipped += 1
logger.info(
"Recovered stale in-progress export %s (file intact on disk)",
export.id,
)
continue
if export.thumb_path:
Path(export.thumb_path).unlink(missing_ok=True)
if video_path:
Path(video_path).unlink(missing_ok=True)
Export.delete().where(Export.id == export.id).execute()
deleted += 1
logger.info(
"Deleted stale in-progress export %s (no usable file on disk)",
export.id,
)
except Exception:
errored += 1
logger.exception("Failed to reap stale export %s", export.id)
logger.info(
"Stale export cleanup complete: %d recovered, %d deleted, %d errored",
flipped,
deleted,
errored,
)
def get_export_job_manager(config: FrigateConfig) -> ExportJobManager:
"""Get or create the singleton export job manager."""
global _job_manager
with _job_manager_lock:
if _job_manager is None:
_job_manager = ExportJobManager(config, _get_max_concurrent(config))
_job_manager.ensure_started()
return _job_manager
def start_export_job(config: FrigateConfig, job: ExportJob) -> str:
"""Queue an export job and return its ID."""
return get_export_job_manager(config).enqueue(job)
def get_export_job(config: FrigateConfig, job_id: str) -> Optional[ExportJob]:
"""Get a queued or completed export job by ID."""
return get_export_job_manager(config).get_job(job_id)
def list_active_export_jobs(config: FrigateConfig) -> list[ExportJob]:
"""List queued and running export jobs."""
return get_export_job_manager(config).list_active_jobs()
def cancel_queued_export_jobs_for_case(
config: FrigateConfig, case_id: str
) -> list[ExportJob]:
"""Cancel queued export jobs that still point at a deleted case."""
return get_export_job_manager(config).cancel_queued_jobs_for_case(case_id)
def available_export_queue_slots(config: FrigateConfig) -> int:
"""Approximate number of additional export jobs that could be queued now."""
return get_export_job_manager(config).available_slots()

View File

@ -152,21 +152,12 @@ class OnvifController:
cam = self.camera_configs[cam_name] cam = self.camera_configs[cam_name]
try: try:
user = cam.onvif.user
password = cam.onvif.password
if user is not None and isinstance(user, bytes):
user = user.decode("utf-8")
if password is not None and isinstance(password, bytes):
password = password.decode("utf-8")
self.cams[cam_name] = { self.cams[cam_name] = {
"onvif": ONVIFCamera( "onvif": ONVIFCamera(
cam.onvif.host, cam.onvif.host,
cam.onvif.port, cam.onvif.port,
user, cam.onvif.user,
password, cam.onvif.password,
wsdl_dir=str(Path(find_spec("onvif").origin).parent / "wsdl"), wsdl_dir=str(Path(find_spec("onvif").origin).parent / "wsdl"),
adjust_time=cam.onvif.ignore_time_mismatch, adjust_time=cam.onvif.ignore_time_mismatch,
encrypt=not cam.onvif.tls_insecure, encrypt=not cam.onvif.tls_insecure,
@ -459,15 +450,15 @@ class OnvifController:
presets = [] presets = []
for preset in presets: for preset in presets:
# Ensure preset name is a Unicode string and handle UTF-8 characters correctly
preset_name = getattr(preset, "Name") or f"preset {preset['token']}" preset_name = getattr(preset, "Name") or f"preset {preset['token']}"
# Some cameras (e.g. Reolink) return UTF-8 bytes that zeep decodes
if isinstance(preset_name, bytes): # as latin-1, producing mojibake. Detect that and repair it by
preset_name = preset_name.decode("utf-8") # round-tripping through latin-1 -> utf-8.
try:
# Convert to lowercase while preserving UTF-8 characters preset_name = preset_name.encode("latin-1").decode("utf-8")
preset_name_lower = preset_name.lower() except (UnicodeEncodeError, UnicodeDecodeError):
self.cams[camera_name]["presets"][preset_name_lower] = preset["token"] pass
self.cams[camera_name]["presets"][preset_name.lower()] = preset["token"]
# get list of supported features # get list of supported features
supported_features = [] supported_features = []
@ -695,9 +686,6 @@ class OnvifController:
self.cams[camera_name]["active"] = False self.cams[camera_name]["active"] = False
async def _move_to_preset(self, camera_name: str, preset: str) -> None: async def _move_to_preset(self, camera_name: str, preset: str) -> None:
if isinstance(preset, bytes):
preset = preset.decode("utf-8")
preset = preset.lower() preset = preset.lower()
if preset not in self.cams[camera_name]["presets"]: if preset not in self.cams[camera_name]["presets"]:

View File

@ -372,6 +372,7 @@ class RecordingMaintainer(threading.Thread):
) )
record_config = self.config.cameras[camera].record record_config = self.config.cameras[camera].record
segment_stats: SegmentInfo | None = None
highest = None highest = None
if record_config.continuous.days > 0: if record_config.continuous.days > 0:
@ -401,9 +402,19 @@ class RecordingMaintainer(threading.Thread):
if highest == "continuous" if highest == "continuous"
else RetainModeEnum.motion else RetainModeEnum.motion
) )
return await self.move_segment( segment_stats = self.segment_stats(camera, start_time, end_time)
camera, start_time, end_time, duration, cache_path, record_mode
) # Here we only check if we should move the segment based on non-object recording retention
# we will always want to check for overlapping review items below before dropping the segment
if not segment_stats.should_discard_segment(record_mode):
return await self.move_segment(
camera,
start_time,
end_time,
duration,
cache_path,
segment_stats,
)
# we fell through the continuous / motion check, so we need to check the review items # we fell through the continuous / motion check, so we need to check the review items
# if the cached segment overlaps with the review items: # if the cached segment overlaps with the review items:
@ -435,15 +446,24 @@ class RecordingMaintainer(threading.Thread):
if review.severity == "alert" if review.severity == "alert"
else record_config.detections.retain.mode else record_config.detections.retain.mode
) )
# move from cache to recordings immediately
return await self.move_segment( if segment_stats is None:
camera, segment_stats = self.segment_stats(camera, start_time, end_time)
start_time,
end_time, if not segment_stats.should_discard_segment(record_mode):
duration, # move from cache to recordings immediately
cache_path, return await self.move_segment(
record_mode, camera,
) start_time,
end_time,
duration,
cache_path,
segment_stats,
)
else:
self.drop_segment(cache_path)
return None
# if it doesn't overlap with an review item, go ahead and drop the segment # if it doesn't overlap with an review item, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera # if it ends more than the configured pre_capture for the camera
# BUT only if continuous/motion is NOT enabled (otherwise wait for processing) # BUT only if continuous/motion is NOT enabled (otherwise wait for processing)
@ -455,6 +475,7 @@ class RecordingMaintainer(threading.Thread):
retain_cutoff = datetime.datetime.fromtimestamp( retain_cutoff = datetime.datetime.fromtimestamp(
most_recently_processed_frame_time - record_config.event_pre_capture most_recently_processed_frame_time - record_config.event_pre_capture
).astimezone(datetime.timezone.utc) ).astimezone(datetime.timezone.utc)
if end_time < retain_cutoff: if end_time < retain_cutoff:
self.drop_segment(cache_path) self.drop_segment(cache_path)
@ -578,15 +599,8 @@ class RecordingMaintainer(threading.Thread):
end_time: datetime.datetime, end_time: datetime.datetime,
duration: float, duration: float,
cache_path: str, cache_path: str,
store_mode: RetainModeEnum, segment_info: SegmentInfo,
) -> Optional[dict[str, Any]]: ) -> Optional[dict[str, Any]]:
segment_info = self.segment_stats(camera, start_time, end_time)
# check if the segment shouldn't be stored
if segment_info.should_discard_segment(store_mode):
self.drop_segment(cache_path)
return None
# directory will be in utc due to start_time being in utc # directory will be in utc due to start_time being in utc
directory = os.path.join( directory = os.path.join(
RECORD_DIR, RECORD_DIR,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,303 @@
"""Tests for the find_similar_objects chat tool."""
import asyncio
import os
import tempfile
import unittest
from types import SimpleNamespace
from unittest.mock import MagicMock
from playhouse.sqlite_ext import SqliteExtDatabase
from frigate.api.chat import (
_execute_find_similar_objects,
get_tool_definitions,
)
from frigate.api.chat_util import (
DESCRIPTION_WEIGHT,
VISUAL_WEIGHT,
distance_to_score,
fuse_scores,
)
from frigate.embeddings.util import ZScoreNormalization
from frigate.models import Event
def _run(coro):
return asyncio.new_event_loop().run_until_complete(coro)
class TestDistanceToScore(unittest.TestCase):
def test_lower_distance_gives_higher_score(self):
stats = ZScoreNormalization()
# Seed the stats with a small distribution so stddev > 0.
stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
close_score = distance_to_score(0.1, stats)
far_score = distance_to_score(0.5, stats)
self.assertGreater(close_score, far_score)
self.assertGreaterEqual(close_score, 0.0)
self.assertLessEqual(close_score, 1.0)
self.assertGreaterEqual(far_score, 0.0)
self.assertLessEqual(far_score, 1.0)
def test_uninitialized_stats_returns_neutral_score(self):
stats = ZScoreNormalization() # n == 0, stddev == 0
self.assertEqual(distance_to_score(0.3, stats), 0.5)
class TestFuseScores(unittest.TestCase):
def test_weights_sum_to_one(self):
self.assertAlmostEqual(VISUAL_WEIGHT + DESCRIPTION_WEIGHT, 1.0)
def test_fuses_both_sides(self):
fused = fuse_scores(visual_score=0.8, description_score=0.4)
expected = VISUAL_WEIGHT * 0.8 + DESCRIPTION_WEIGHT * 0.4
self.assertAlmostEqual(fused, expected)
def test_missing_description_uses_visual_only(self):
fused = fuse_scores(visual_score=0.7, description_score=None)
self.assertAlmostEqual(fused, 0.7)
def test_missing_visual_uses_description_only(self):
fused = fuse_scores(visual_score=None, description_score=0.6)
self.assertAlmostEqual(fused, 0.6)
def test_both_missing_returns_none(self):
self.assertIsNone(fuse_scores(visual_score=None, description_score=None))
class TestToolDefinition(unittest.TestCase):
def test_find_similar_objects_is_registered(self):
tools = get_tool_definitions()
names = [t["function"]["name"] for t in tools]
self.assertIn("find_similar_objects", names)
def test_find_similar_objects_schema(self):
tools = get_tool_definitions()
tool = next(t for t in tools if t["function"]["name"] == "find_similar_objects")
params = tool["function"]["parameters"]["properties"]
self.assertIn("event_id", params)
self.assertIn("after", params)
self.assertIn("before", params)
self.assertIn("cameras", params)
self.assertIn("labels", params)
self.assertIn("sub_labels", params)
self.assertIn("zones", params)
self.assertIn("similarity_mode", params)
self.assertIn("min_score", params)
self.assertIn("limit", params)
self.assertEqual(tool["function"]["parameters"]["required"], ["event_id"])
self.assertEqual(
params["similarity_mode"]["enum"], ["visual", "semantic", "fused"]
)
class TestExecuteFindSimilarObjects(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.NamedTemporaryFile(suffix=".db", delete=False)
self.tmp.close()
self.db = SqliteExtDatabase(self.tmp.name)
Event.bind(self.db, bind_refs=False, bind_backrefs=False)
self.db.connect()
self.db.create_tables([Event])
# Insert an anchor plus two candidates.
def make(event_id, label="car", camera="driveway", start=1_700_000_100):
Event.create(
id=event_id,
label=label,
sub_label=None,
camera=camera,
start_time=start,
end_time=start + 10,
top_score=0.9,
score=0.9,
false_positive=False,
zones=[],
thumbnail="",
has_clip=True,
has_snapshot=True,
region=[0, 0, 1, 1],
box=[0, 0, 1, 1],
area=1,
retain_indefinitely=False,
ratio=1.0,
plus_id="",
model_hash="",
detector_type="",
model_type="",
data={"description": "a green sedan"},
)
make("anchor", start=1_700_000_200)
make("cand_a", start=1_700_000_100)
make("cand_b", start=1_700_000_150)
self.make = make
def tearDown(self):
self.db.close()
os.unlink(self.tmp.name)
def _make_request(self, semantic_enabled=True, embeddings=None):
app = SimpleNamespace(
embeddings=embeddings,
frigate_config=SimpleNamespace(
semantic_search=SimpleNamespace(enabled=semantic_enabled),
),
)
return SimpleNamespace(app=app)
def test_semantic_search_disabled_returns_error(self):
req = self._make_request(semantic_enabled=False)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor"},
allowed_cameras=["driveway"],
)
)
self.assertEqual(result["error"], "semantic_search_disabled")
def test_anchor_not_found_returns_error(self):
embeddings = MagicMock()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "nope"},
allowed_cameras=["driveway"],
)
)
self.assertEqual(result["error"], "anchor_not_found")
def test_empty_candidates_returns_empty_results(self):
embeddings = MagicMock()
req = self._make_request(embeddings=embeddings)
# Filter to a camera with no other events.
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
allowed_cameras=["nonexistent_cam"],
)
)
self.assertEqual(result["results"], [])
self.assertFalse(result["candidate_truncated"])
self.assertEqual(result["anchor"]["id"], "anchor")
def test_fused_calls_both_searches_and_ranks(self):
embeddings = MagicMock()
# cand_a visually closer, cand_b semantically closer.
embeddings.search_thumbnail.return_value = [
("cand_a", 0.10),
("cand_b", 0.40),
]
embeddings.search_description.return_value = [
("cand_a", 0.50),
("cand_b", 0.20),
]
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
embeddings.desc_stats = ZScoreNormalization()
embeddings.desc_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor"},
allowed_cameras=["driveway"],
)
)
embeddings.search_thumbnail.assert_called_once()
embeddings.search_description.assert_called_once()
# cand_a should rank first because visual is weighted higher.
self.assertEqual(result["results"][0]["id"], "cand_a")
self.assertIn("score", result["results"][0])
self.assertEqual(result["similarity_mode"], "fused")
def test_visual_mode_only_calls_thumbnail(self):
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [("cand_a", 0.1)]
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3])
req = self._make_request(embeddings=embeddings)
_run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual"},
allowed_cameras=["driveway"],
)
)
embeddings.search_thumbnail.assert_called_once()
embeddings.search_description.assert_not_called()
def test_semantic_mode_only_calls_description(self):
embeddings = MagicMock()
embeddings.search_description.return_value = [("cand_a", 0.1)]
embeddings.desc_stats = ZScoreNormalization()
embeddings.desc_stats._update([0.1, 0.2, 0.3])
req = self._make_request(embeddings=embeddings)
_run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "semantic"},
allowed_cameras=["driveway"],
)
)
embeddings.search_description.assert_called_once()
embeddings.search_thumbnail.assert_not_called()
def test_min_score_drops_low_scoring_results(self):
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [
("cand_a", 0.10),
("cand_b", 0.90),
]
embeddings.search_description.return_value = []
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5])
embeddings.desc_stats = ZScoreNormalization()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual", "min_score": 0.6},
allowed_cameras=["driveway"],
)
)
ids = [r["id"] for r in result["results"]]
self.assertIn("cand_a", ids)
self.assertNotIn("cand_b", ids)
def test_labels_defaults_to_anchor_label(self):
self.make("person_a", label="person")
embeddings = MagicMock()
embeddings.search_thumbnail.return_value = [
("cand_a", 0.1),
("cand_b", 0.2),
]
embeddings.search_description.return_value = []
embeddings.thumb_stats = ZScoreNormalization()
embeddings.thumb_stats._update([0.1, 0.2, 0.3])
embeddings.desc_stats = ZScoreNormalization()
req = self._make_request(embeddings=embeddings)
result = _run(
_execute_find_similar_objects(
req,
{"event_id": "anchor", "similarity_mode": "visual"},
allowed_cameras=["driveway"],
)
)
ids = [r["id"] for r in result["results"]]
self.assertNotIn("person_a", ids)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,211 @@
"""Tests for DeferredRealtimeProcessorApi."""
import sys
import time
import unittest
from typing import Any
from unittest.mock import MagicMock, patch
import numpy as np
from frigate.data_processing.real_time.api import DeferredRealtimeProcessorApi
# Mock TFLite before importing classification module
_MOCK_MODULES = [
"tflite_runtime",
"tflite_runtime.interpreter",
"ai_edge_litert",
"ai_edge_litert.interpreter",
]
for mod in _MOCK_MODULES:
if mod not in sys.modules:
sys.modules[mod] = MagicMock()
from frigate.data_processing.real_time.custom_classification import ( # noqa: E402
CustomObjectClassificationProcessor,
)
class StubDeferredProcessor(DeferredRealtimeProcessorApi):
"""Minimal concrete subclass for testing the deferred base."""
def __init__(self, max_queue: int = 8):
config = MagicMock()
metrics = MagicMock()
super().__init__(config, metrics, max_queue=max_queue)
self.processed_items: list[tuple] = []
def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None:
"""Enqueue every call — no gating logic in the stub."""
self._enqueue_task(("frame", obj_data, frame.copy()))
def _process_task(self, task: tuple) -> None:
kind = task[0]
if kind == "frame":
_, obj_data, frame = task
self.processed_items.append((obj_data["id"], frame.shape))
self._emit_result(
{
"type": "test_result",
"id": obj_data["id"],
"label": "cat",
"score": 0.95,
}
)
elif kind == "expire":
_, object_id = task
self.processed_items.append(("expired", object_id))
def handle_request(
self, topic: str, request_data: dict[str, Any]
) -> dict[str, Any] | None:
if topic == "reload":
def _do_reload(data):
return {"success": True, "model": data.get("name")}
return self._enqueue_request(_do_reload, request_data)
return None
def expire_object(self, object_id: str, camera: str) -> None:
self._enqueue_task(("expire", object_id))
class TestDeferredProcessorBase(unittest.TestCase):
def test_enqueue_and_drain(self):
"""Tasks enqueued on main thread are processed by worker, results are drainable."""
proc = StubDeferredProcessor()
frame = np.zeros((100, 100, 3), dtype=np.uint8)
proc.process_frame({"id": "obj1"}, frame)
proc.process_frame({"id": "obj2"}, frame)
# Give the worker time to process
time.sleep(0.1)
results = proc.drain_results()
self.assertEqual(len(results), 2)
self.assertEqual(results[0]["id"], "obj1")
self.assertEqual(results[1]["id"], "obj2")
# Second drain should be empty
self.assertEqual(len(proc.drain_results()), 0)
def test_backpressure_drops_tasks(self):
"""When queue is full, new tasks are silently dropped."""
proc = StubDeferredProcessor(max_queue=2)
frame = np.zeros((10, 10, 3), dtype=np.uint8)
for i in range(10):
proc.process_frame({"id": f"obj{i}"}, frame)
time.sleep(0.2)
results = proc.drain_results()
# The key property: no crash, no unbounded growth
self.assertLessEqual(len(results), 10)
self.assertGreater(len(results), 0)
def test_handle_request_through_worker(self):
"""handle_request blocks until the worker processes it and returns a response."""
proc = StubDeferredProcessor()
result = proc.handle_request("reload", {"name": "my_model"})
self.assertEqual(result, {"success": True, "model": "my_model"})
def test_expire_object_serialized_with_work(self):
"""expire_object goes through the queue, serialized with inference work."""
proc = StubDeferredProcessor()
frame = np.zeros((10, 10, 3), dtype=np.uint8)
proc.process_frame({"id": "obj1"}, frame)
proc.expire_object("obj1", "front_door")
time.sleep(0.1)
# Both should have been processed in order
self.assertEqual(len(proc.processed_items), 2)
self.assertEqual(proc.processed_items[0][0], "obj1")
self.assertEqual(proc.processed_items[1], ("expired", "obj1"))
def test_shutdown_joins_worker(self):
"""shutdown() signals the worker to stop and joins the thread."""
proc = StubDeferredProcessor()
proc.shutdown()
self.assertFalse(proc._worker.is_alive())
def test_drain_results_returns_list(self):
"""drain_results returns a plain list, not a deque."""
proc = StubDeferredProcessor()
results = proc.drain_results()
self.assertIsInstance(results, list)
class TestCustomObjectClassificationDeferred(unittest.TestCase):
"""Test that CustomObjectClassificationProcessor uses the deferred pattern correctly."""
def _make_processor(self):
config = MagicMock()
model_config = MagicMock()
model_config.name = "test_breed"
model_config.object_config = MagicMock()
model_config.object_config.objects = ["dog"]
model_config.threshold = 0.5
model_config.save_attempts = 10
model_config.object_config.classification_type = "sub_label"
publisher = MagicMock()
requestor = MagicMock()
metrics = MagicMock()
metrics.classification_speeds = {}
metrics.classification_cps = {}
with patch.object(
CustomObjectClassificationProcessor,
"_CustomObjectClassificationProcessor__build_detector",
):
proc = CustomObjectClassificationProcessor(
config, model_config, publisher, requestor, metrics
)
proc.interpreter = None
proc.tensor_input_details = [{"index": 0}]
proc.tensor_output_details = [{"index": 0}]
proc.labelmap = {0: "labrador", 1: "poodle", 2: "none"}
return proc
def test_is_deferred_processor(self):
"""CustomObjectClassificationProcessor should be a DeferredRealtimeProcessorApi."""
proc = self._make_processor()
self.assertIsInstance(proc, DeferredRealtimeProcessorApi)
def test_expire_clears_history(self):
"""expire_object should clear classification history for the object."""
proc = self._make_processor()
proc.classification_history["obj1"] = [("labrador", 0.9, 1.0)]
proc.expire_object("obj1", "front")
time.sleep(0.1)
self.assertNotIn("obj1", proc.classification_history)
def test_drain_results_empty_when_no_model(self):
"""With no interpreter, process_frame saves training images but emits no results."""
proc = self._make_processor()
proc.interpreter = None
frame = np.zeros((150, 100), dtype=np.uint8)
obj_data = {
"id": "obj1",
"label": "dog",
"false_positive": False,
"end_time": None,
"box": [10, 10, 50, 50],
"camera": "front",
}
with patch(
"frigate.data_processing.real_time.custom_classification.write_classification_attempt"
):
proc.process_frame(obj_data, frame)
time.sleep(0.1)
results = proc.drain_results()
self.assertEqual(len(results), 0)
if __name__ == "__main__":
unittest.main()

View File

@ -62,11 +62,12 @@ def get_camera_regions_grid(
.where((Event.false_positive == None) | (Event.false_positive == False)) .where((Event.false_positive == None) | (Event.false_positive == False))
.where(Event.start_time > last_update) .where(Event.start_time > last_update)
) )
valid_event_ids = [e["id"] for e in events.dicts()]
logger.debug(f"Found {len(valid_event_ids)} new events for {name}") event_count = events.count()
logger.debug(f"Found {event_count} new events for {name}")
# no new events, return as is # no new events, return as is
if not valid_event_ids: if event_count == 0:
return grid return grid
new_update = datetime.datetime.now().timestamp() new_update = datetime.datetime.now().timestamp()
@ -78,7 +79,7 @@ def get_camera_regions_grid(
Timeline.data, Timeline.data,
] ]
) )
.where(Timeline.source_id << valid_event_ids) .where(Timeline.source_id << events)
.limit(10000) .limit(10000)
.dicts() .dicts()
) )

View File

@ -0,0 +1,116 @@
/**
* Global allowlist of regex patterns that the error collector ignores.
*
* Each entry MUST include a comment explaining what it silences and why.
* The allowlist is filtered at collection time, so failure messages list
* only unfiltered errors.
*
* Per-spec additions go through the `expectedErrors` test fixture parameter
* (see error-collector.ts), not by editing this file. That keeps allowlist
* drift visible per-PR rather than buried in shared infrastructure.
*
* NOTE ON CONSOLE vs REQUEST ERRORS:
* When a network request returns a 5xx response, the browser emits two
* events that the error collector captures:
* [request] "500 Internal Server Error <url>" from onResponse (URL included)
* [console] "Failed to load resource: ..." from onConsole (URL NOT included)
*
* The request-level message includes the URL, so those patterns are specific.
* The console-level message text (from ConsoleMessage.text()) does NOT include
* the URL the URL is stored separately in e.url. Therefore the console
* pattern for HTTP 500s cannot be URL-discriminated, and a single pattern
* covers all such browser echoes. This is safe because every such console
* error is already caught (and specifically matched) by its paired [request]
* entry below.
*/
export const GLOBAL_ALLOWLIST: RegExp[] = [
// -------------------------------------------------------------------------
// Browser echo of HTTP 5xx responses (console mirror of [request] events).
//
// Whenever the browser receives a 5xx response it emits a console error:
// "Failed to load resource: the server responded with a status of 500
// (Internal Server Error)"
// The URL is NOT part of ConsoleMessage.text() — it is stored separately.
// Every console error of this form is therefore paired with a specific
// [request] 500 entry below that names the exact endpoint. Allowlisting
// this pattern here silences the browser echo; the request-level entries
// enforce specificity.
// -------------------------------------------------------------------------
/Failed to load resource: the server responded with a status of 500/,
// -------------------------------------------------------------------------
// Mock infrastructure gaps — API endpoints not yet covered by ApiMocker.
//
// These produce 500s because Vite's preview server has no handler for them.
// Each is a TODO(real-bug): the mock should be extended so these endpoints
// return sensible fixture data in tests.
//
// Only [request] patterns are listed here; the paired [console] mirror is
// covered by the "Failed to load resource" entry above.
// -------------------------------------------------------------------------
// TODO(real-bug): ApiMocker registers "**/api/reviews**" (plural) but the
// app fetches /api/review (singular) for the review list and timeline.
// Affects: review.spec.ts, navigation.spec.ts, live.spec.ts, auth.spec.ts.
// Fix: add route handlers for /api/review and /api/review/** in api-mocker.ts.
/500 Internal Server Error.*\/api\/review(\?|\/|$)/,
// TODO(real-bug): /api/stats/history is not mocked; the system page fetches
// it for the detector/process history charts.
// Fix: add route handler for /api/stats/history in api-mocker.ts.
/500 Internal Server Error.*\/api\/stats\/history/,
// TODO(real-bug): /api/event_ids is not mocked; the explore/search page
// fetches it to resolve event IDs for display.
// Fix: add route handler for /api/event_ids in api-mocker.ts.
/500 Internal Server Error.*\/api\/event_ids/,
// TODO(real-bug): /api/sub_labels?split_joined=1 returns 500; the mock
// registers "**/api/sub_labels" which may not match when a query string is
// present, or route registration order causes the catch-all to win first.
// Fix: change the mock route to "**/api/sub_labels**" in api-mocker.ts.
/500 Internal Server Error.*\/api\/sub_labels/,
// TODO(real-bug): MediaMocker handles /api/*/latest.jpg but the app also
// requests /api/*/latest.webp (webp format) for camera snapshots.
// Affects: live.spec.ts, review.spec.ts, auth.spec.ts, navigation.spec.ts.
// Fix: add route handler for /api/*/latest.webp in MediaMocker.install().
/500 Internal Server Error.*\/api\/[^/]+\/latest\.webp/,
/failed: net::ERR_ABORTED.*\/api\/[^/]+\/latest\.webp/,
// -------------------------------------------------------------------------
// Mock infrastructure gap — WebSocket streams.
//
// Playwright's page.route() does not intercept WebSocket connections.
// The jsmpeg live-stream WS connections to /live/jsmpeg/* always fail
// with a 500 handshake error because the Vite preview server has no WS
// handler. TODO(real-bug): add WsMocker support for jsmpeg WebSocket
// connections, or suppress the connection attempt in the test environment.
// Affects: live.spec.ts (single camera view), auth.spec.ts.
// -------------------------------------------------------------------------
/WebSocket connection to '.*\/live\/jsmpeg\/.*' failed/,
// -------------------------------------------------------------------------
// Benign — lazy-loaded chunk aborts during navigation.
//
// When a test navigates away from a page while the browser is still
// fetching lazily-split JS/CSS asset chunks, the in-flight fetch is
// cancelled (net::ERR_ABORTED). This is normal browser behaviour on
// navigation and does not indicate a real error; the assets load fine
// on a stable connection.
// -------------------------------------------------------------------------
/failed: net::ERR_ABORTED.*\/assets\//,
// -------------------------------------------------------------------------
// Real app bug — Radix UI DialogContent missing accessible title.
//
// TODO(real-bug): A dialog somewhere in the app renders <DialogContent>
// without a <DialogTitle>, violating Radix UI's accessibility contract.
// The warning originates from the bundled main-*.js. Investigate which
// dialog component is missing the title and add a VisuallyHidden DialogTitle.
// Likely candidate: face-library or search-detail dialog in explore page.
// See: https://radix-ui.com/primitives/docs/components/dialog
// -------------------------------------------------------------------------
/`DialogContent` requires a `DialogTitle`/,
];

View File

@ -0,0 +1,122 @@
/**
* Collects console errors, page errors, and failed network requests
* during a Playwright test, with regex-based allowlist filtering.
*
* Usage:
* const collector = installErrorCollector(page, [...GLOBAL_ALLOWLIST]);
* // ... run test ...
* collector.assertClean(); // throws if any non-allowlisted error
*
* The collector is wired into the `frigateApp` fixture so every test
* gets it for free. Tests that intentionally trigger an error pass
* additional regexes via the `expectedErrors` fixture parameter.
*/
import type { Page, Request, Response, ConsoleMessage } from "@playwright/test";
export type CollectedError = {
kind: "console" | "pageerror" | "request";
message: string;
url?: string;
stack?: string;
};
export type ErrorCollector = {
errors: CollectedError[];
assertClean(): void;
};
function isAllowlisted(message: string, allowlist: RegExp[]): boolean {
return allowlist.some((pattern) => pattern.test(message));
}
function firstStackFrame(stack: string | undefined): string | undefined {
if (!stack) return undefined;
const lines = stack
.split("\n")
.map((l) => l.trim())
.filter(Boolean);
// Skip the error message line (line 0); return the first "at ..." frame
return lines.find((l) => l.startsWith("at "));
}
function isSameOrigin(url: string, baseURL: string | undefined): boolean {
if (!baseURL) return true;
try {
return new URL(url).origin === new URL(baseURL).origin;
} catch {
return false;
}
}
export function installErrorCollector(
page: Page,
allowlist: RegExp[],
): ErrorCollector {
const errors: CollectedError[] = [];
const baseURL = (
page.context() as unknown as { _options?: { baseURL?: string } }
)._options?.baseURL;
const onConsole = (msg: ConsoleMessage) => {
if (msg.type() !== "error") return;
const text = msg.text();
if (isAllowlisted(text, allowlist)) return;
errors.push({
kind: "console",
message: text,
url: msg.location().url,
});
};
const onPageError = (err: Error) => {
const text = err.message;
if (isAllowlisted(text, allowlist)) return;
errors.push({
kind: "pageerror",
message: text,
stack: firstStackFrame(err.stack),
});
};
const onResponse = (response: Response) => {
const status = response.status();
if (status < 500) return;
const url = response.url();
if (!isSameOrigin(url, baseURL)) return;
const text = `${status} ${response.statusText()} ${url}`;
if (isAllowlisted(text, allowlist)) return;
errors.push({ kind: "request", message: text, url });
};
const onRequestFailed = (request: Request) => {
const url = request.url();
if (!isSameOrigin(url, baseURL)) return;
const failure = request.failure();
const text = `failed: ${failure?.errorText ?? "unknown"} ${url}`;
if (isAllowlisted(text, allowlist)) return;
errors.push({ kind: "request", message: text, url });
};
page.on("console", onConsole);
page.on("pageerror", onPageError);
page.on("response", onResponse);
page.on("requestfailed", onRequestFailed);
return {
errors,
assertClean() {
if (errors.length === 0) return;
const formatted = errors
.map((e, i) => {
const stack = e.stack ? `\n ${e.stack}` : "";
const url = e.url && e.url !== e.message ? ` (${e.url})` : "";
return ` ${i + 1}. [${e.kind}] ${e.message}${url}${stack}`;
})
.join("\n");
throw new Error(
`Page emitted ${errors.length} unexpected error${errors.length === 1 ? "" : "s"}:\n${formatted}`,
);
},
};
}

View File

@ -6,6 +6,11 @@
* @playwright/test directly. The `frigateApp` fixture provides a * @playwright/test directly. The `frigateApp` fixture provides a
* fully mocked Frigate frontend ready for interaction. * fully mocked Frigate frontend ready for interaction.
* *
* The fixture also installs the error collector (see error-collector.ts).
* Any console error, page error, or same-origin failed request that is
* not on the global allowlist or the test's `expectedErrors` list will
* fail the test in the fixture's teardown.
*
* CRITICAL: All route/WS handlers are registered before page.goto() * CRITICAL: All route/WS handlers are registered before page.goto()
* to prevent AuthProvider from redirecting to login.html. * to prevent AuthProvider from redirecting to login.html.
*/ */
@ -17,6 +22,8 @@ import {
type ApiMockOverrides, type ApiMockOverrides,
} from "../helpers/api-mocker"; } from "../helpers/api-mocker";
import { WsMocker } from "../helpers/ws-mocker"; import { WsMocker } from "../helpers/ws-mocker";
import { installErrorCollector, type ErrorCollector } from "./error-collector";
import { GLOBAL_ALLOWLIST } from "./error-allowlist";
export class FrigateApp { export class FrigateApp {
public api: ApiMocker; public api: ApiMocker;
@ -67,10 +74,43 @@ export class FrigateApp {
type FrigateFixtures = { type FrigateFixtures = {
frigateApp: FrigateApp; frigateApp: FrigateApp;
/**
* Per-test additional allowlist regex patterns. Tests that intentionally
* trigger errors (e.g. error-state tests that hit a mocked 500) declare
* their expected errors here so the collector ignores them.
*
* Default is `[]` most tests should not need this.
*/
expectedErrors: RegExp[];
errorCollector: ErrorCollector;
}; };
export const test = base.extend<FrigateFixtures>({ export const test = base.extend<FrigateFixtures>({
frigateApp: async ({ page }, use, testInfo) => { expectedErrors: [[], { option: true }],
errorCollector: async ({ page, expectedErrors }, use, testInfo) => {
const collector = installErrorCollector(page, [
...GLOBAL_ALLOWLIST,
...expectedErrors,
]);
await use(collector);
if (process.env.E2E_STRICT_ERRORS === "1") {
collector.assertClean();
} else if (collector.errors.length > 0) {
// Soft mode: attach errors to the test report so they're visible
// without failing the run.
await testInfo.attach("collected-errors.txt", {
body: collector.errors
.map((e) => `[${e.kind}] ${e.message}${e.url ? ` (${e.url})` : ""}`)
.join("\n"),
contentType: "text/plain",
});
}
},
frigateApp: async ({ page, errorCollector }, use, testInfo) => {
// Reference the collector so its `use()` runs and teardown fires
void errorCollector;
const app = new FrigateApp(page, testInfo.project.name); const app = new FrigateApp(page, testInfo.project.name);
await app.installDefaults(); await app.installDefaults();
await use(app); await use(app);

View File

@ -82,14 +82,26 @@ export class ApiMocker {
route.fulfill({ json: stats }), route.fulfill({ json: stats }),
); );
// Reviews // Reviews. The real backend exposes /review (singular) for the main
await this.page.route("**/api/reviews**", (route) => { // list and /review/summary for the summary — the previous plural glob
const url = route.request().url(); // (**/api/reviews**) never matched either endpoint, so review-dependent
if (url.includes("summary")) { // tests silently ran without data. The POST mutations at /reviews/viewed
return route.fulfill({ json: reviewSummary }); // and /reviews/delete (plural) still fall through to the generic
} // mutation catch-all further down the file.
return route.fulfill({ json: reviews }); await this.page.route(/\/api\/review\/summary/, (route) =>
}); route.fulfill({ json: reviewSummary }),
);
await this.page.route(/\/api\/review(\?|$)/, (route) =>
route.fulfill({ json: reviews }),
);
// Export jobs. The Exports page polls this every 2s while any export
// is in_progress; without a mock route it falls through to the preview
// server which returns 500 and makes the page flap between loading and
// rendered state, breaking tests that navigate to /export.
await this.page.route("**/api/jobs/export", (route) =>
route.fulfill({ json: [] }),
);
// Recordings summary // Recordings summary
await this.page.route("**/api/recordings/summary**", (route) => await this.page.route("**/api/recordings/summary**", (route) =>

View File

@ -0,0 +1,56 @@
/**
* Per-test mock overrides for driving empty / loading / error states.
*
* Playwright route handlers are LIFO: the most recently registered handler
* matching a URL takes precedence. The frigateApp fixture installs default
* mocks before the test body runs, so these helpers called inside the
* test body register AFTER the defaults and therefore win.
*
* Always call these BEFORE the navigation that triggers the request.
*
* Example:
* await mockEmpty(page, "**\/api\/exports**");
* await frigateApp.goto("/export");
* // Page now renders the empty state
*/
import type { Page } from "@playwright/test";
/** Return an empty array for the matched endpoint. */
export async function mockEmpty(
page: Page,
urlPattern: string | RegExp,
): Promise<void> {
await page.route(urlPattern, (route) => route.fulfill({ json: [] }));
}
/** Return an HTTP error for the matched endpoint. Default status 500. */
export async function mockError(
page: Page,
urlPattern: string | RegExp,
status = 500,
): Promise<void> {
await page.route(urlPattern, (route) =>
route.fulfill({
status,
json: { success: false, message: "Mocked error" },
}),
);
}
/**
* Delay the response by `ms` milliseconds before fulfilling with the
* provided body. Use to assert loading-state UI is visible during the
* delay window.
*/
export async function mockDelay(
page: Page,
urlPattern: string | RegExp,
ms: number,
body: unknown = [],
): Promise<void> {
await page.route(urlPattern, async (route) => {
await new Promise((resolve) => setTimeout(resolve, ms));
await route.fulfill({ json: body });
});
}

View File

@ -79,4 +79,57 @@ export class BasePage {
async waitForPageLoad() { async waitForPageLoad() {
await this.page.waitForSelector("#pageRoot", { timeout: 10_000 }); await this.page.waitForSelector("#pageRoot", { timeout: 10_000 });
} }
/**
* Open the mobile-only export pane / sheet that slides up from the
* bottom on the export page. No-op on desktop. Returns the pane locator
* so the caller can assert against its contents.
*/
async openMobilePane(): Promise<Locator> {
if (this.isDesktop) {
// Return the desktop equivalent (the main content area itself)
return this.pageRoot;
}
// Look for any element that opens a sheet/dialog on tap.
// Specific views override this with their own selector.
const pane = this.page.locator('[role="dialog"]').first();
return pane;
}
/**
* Open a side drawer (e.g. mobile filter drawer). View-specific page
* objects should override this with their actual trigger selector.
* The default implementation looks for a button labelled "Open menu"
* or "Filters" and clicks it, then returns the drawer locator.
*/
async openDrawer(): Promise<Locator> {
if (this.isDesktop) {
return this.pageRoot;
}
const trigger = this.page
.getByRole("button", { name: /menu|filter/i })
.first();
if (await trigger.count()) {
await trigger.click();
}
return this.page.locator('[role="dialog"], [data-state="open"]').first();
}
/**
* Open a bottom sheet (vaul). View-specific page objects should
* override this with their actual trigger selector.
*/
async openBottomSheet(): Promise<Locator> {
if (this.isDesktop) {
return this.pageRoot;
}
return this.page.locator("[vaul-drawer]").first();
}
/** Close any currently-open mobile overlay (drawer, sheet, dialog). */
async closeMobileOverlay(): Promise<void> {
if (this.isDesktop) return;
// Press Escape — Radix dialogs and vaul both close on Escape
await this.page.keyboard.press("Escape");
}
} }

View File

@ -0,0 +1,160 @@
#!/usr/bin/env node
/**
* Lint script for e2e specs. Bans lenient test patterns and requires
* a @mobile-tagged test in every spec under specs/ (excluding _meta/).
*
* Banned patterns:
* - page.waitForTimeout( use expect().toPass() or waitFor instead
* - if (await ... .isVisible()) assertions must be unconditional
* - if ((await ... .count()) > 0) same as above
* - expect(... .length).toBeGreaterThan(0) on textContent results
*
* Escape hatch: append `// e2e-lint-allow` on any line to silence the
* check for that line. Use sparingly and explain why in a comment above.
*
* @mobile rule: every .spec.ts under specs/ (not specs/_meta/) must
* contain at least one test title or describe with the substring "@mobile".
*
* Specs in PENDING_REWRITE are exempt from all rules until they are
* rewritten with proper assertions and mobile coverage. Remove each
* entry when its spec is updated.
*/
import { readFileSync, readdirSync, statSync } from "node:fs";
import { join, relative, resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
const __dirname = dirname(fileURLToPath(import.meta.url));
const SPECS_DIR = resolve(__dirname, "..", "specs");
const META_PREFIX = resolve(SPECS_DIR, "_meta");
// Specs exempt from lint rules until they are rewritten with proper
// assertions and mobile coverage. Remove each entry when its spec is updated.
const PENDING_REWRITE = new Set([
"auth.spec.ts",
"chat.spec.ts",
"classification.spec.ts",
"config-editor.spec.ts",
"explore.spec.ts",
"export.spec.ts",
"face-library.spec.ts",
"live.spec.ts",
"logs.spec.ts",
"navigation.spec.ts",
"replay.spec.ts",
"review.spec.ts",
"system.spec.ts",
]);
const BANNED_PATTERNS = [
{
name: "page.waitForTimeout",
regex: /\bwaitForTimeout\s*\(/,
advice:
"Use expect.poll(), expect(...).toPass(), or waitFor() with a real condition.",
},
{
name: "conditional isVisible() assertion",
regex: /\bif\s*\(\s*await\s+[^)]*\.isVisible\s*\(/,
advice:
"Assertions must be unconditional. Use expect(...).toBeVisible() instead.",
},
{
name: "conditional count() assertion",
regex: /\bif\s*\(\s*\(?\s*await\s+[^)]*\.count\s*\(\s*\)\s*\)?\s*[><=!]/,
advice:
"Assertions must be unconditional. Use expect(...).toHaveCount(n).",
},
{
name: "vacuous textContent length assertion",
regex: /expect\([^)]*\.length\)\.toBeGreaterThan\(0\)/,
advice:
"Assert specific content, not that some text exists.",
},
];
function walk(dir) {
const entries = readdirSync(dir);
const out = [];
for (const entry of entries) {
const full = join(dir, entry);
const st = statSync(full);
if (st.isDirectory()) {
out.push(...walk(full));
} else if (entry.endsWith(".spec.ts")) {
out.push(full);
}
}
return out;
}
function lintFile(file) {
const basename = file.split("/").pop();
if (PENDING_REWRITE.has(basename)) return [];
if (file.includes("/specs/settings/")) return [];
const errors = [];
const text = readFileSync(file, "utf8");
const lines = text.split("\n");
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (line.includes("e2e-lint-allow")) continue;
for (const pat of BANNED_PATTERNS) {
if (pat.regex.test(line)) {
errors.push({
file,
line: i + 1,
col: 1,
rule: pat.name,
message: `${pat.name}: ${pat.advice}`,
source: line.trim(),
});
}
}
}
// @mobile rule: skip _meta
const isMeta = file.startsWith(META_PREFIX);
if (!isMeta) {
if (!/@mobile\b/.test(text)) {
errors.push({
file,
line: 1,
col: 1,
rule: "missing @mobile test",
message:
'Spec must contain at least one test or describe tagged with "@mobile".',
source: "",
});
}
}
return errors;
}
function main() {
const files = walk(SPECS_DIR);
const allErrors = [];
for (const f of files) {
allErrors.push(...lintFile(f));
}
if (allErrors.length === 0) {
console.log(`e2e:lint: ${files.length} spec files OK`);
process.exit(0);
}
for (const err of allErrors) {
const rel = relative(process.cwd(), err.file);
console.error(`${rel}:${err.line}:${err.col} ${err.rule}`);
console.error(` ${err.message}`);
if (err.source) console.error(` > ${err.source}`);
}
console.error(
`\ne2e:lint: ${allErrors.length} error${allErrors.length === 1 ? "" : "s"} in ${files.length} files`,
);
process.exit(1);
}
main();

View File

@ -0,0 +1,112 @@
/**
* Self-tests for the error collector fixture itself.
*
* These guard against future regressions in the safety net. Each test
* deliberately triggers (or avoids triggering) an error to verify the
* collector behaves correctly. Tests that expect to fail use the
* `expectedErrors` fixture parameter to allowlist their own errors.
*/
import { test, expect } from "../../fixtures/frigate-test";
// test.use applies to a whole describe block in Playwright, so each test
// that needs a custom allowlist gets its own describe.
test.describe("Error Collector — clean @meta", () => {
test("clean page passes", async ({ frigateApp }) => {
await frigateApp.goto("/");
// No errors triggered. The fixture teardown should not throw.
});
});
test.describe("Error Collector — unallowlisted console error fails @meta", () => {
test("console.error fails the test when not allowlisted", async ({
page,
frigateApp,
}) => {
test.skip(
process.env.E2E_STRICT_ERRORS !== "1",
"Requires E2E_STRICT_ERRORS=1 to assert failure",
);
test.fail(); // We expect the fixture teardown to throw
await frigateApp.goto("/");
await page.evaluate(() => {
// eslint-disable-next-line no-console
console.error("UNEXPECTED_DELIBERATE_TEST_ERROR_xyz123");
});
});
});
test.describe("Error Collector — allowlisted console error passes @meta", () => {
test.use({ expectedErrors: [/ALLOWED_DELIBERATE_TEST_ERROR_xyz123/] });
test("console.error is silenced when allowlisted via expectedErrors", async ({
page,
frigateApp,
}) => {
await frigateApp.goto("/");
await page.evaluate(() => {
// eslint-disable-next-line no-console
console.error("ALLOWED_DELIBERATE_TEST_ERROR_xyz123");
});
});
});
test.describe("Error Collector — uncaught pageerror fails @meta", () => {
test("uncaught pageerror fails the test", async ({ page, frigateApp }) => {
test.skip(
process.env.E2E_STRICT_ERRORS !== "1",
"Requires E2E_STRICT_ERRORS=1 to assert failure",
);
test.fail();
await frigateApp.goto("/");
await page.evaluate(() => {
setTimeout(() => {
throw new Error("UNCAUGHT_DELIBERATE_TEST_ERROR_xyz789");
}, 0);
});
// Wait a frame to let the throw propagate before fixture teardown.
// The marker below silences the e2e:lint banned-pattern check on this line.
await page.waitForTimeout(100); // e2e-lint-allow: deliberate; need to await async throw
});
});
test.describe("Error Collector — 5xx fails @meta", () => {
test("same-origin 5xx response fails the test", async ({
page,
frigateApp,
}) => {
test.skip(
process.env.E2E_STRICT_ERRORS !== "1",
"Requires E2E_STRICT_ERRORS=1 to assert failure",
);
test.fail();
await page.route("**/api/version", (route) =>
route.fulfill({ status: 500, body: "boom" }),
);
await frigateApp.goto("/");
await page.evaluate(() => fetch("/api/version").catch(() => {}));
// Give the response listener a microtask to fire
await expect.poll(async () => true).toBe(true);
});
});
test.describe("Error Collector — allowlisted 5xx passes @meta", () => {
// Use a single alternation regex so test.use() receives a 1-element array.
// Playwright's isFixtureTuple() treats any [value, object] pair as a fixture
// tuple, so a 2-element array whose second item is a RegExp would be
// misinterpreted as [defaultValue, options]. Both the request collector
// error ("500 … /api/version") and the browser console error
// ("Failed to load resource … 500") are matched by the alternation below.
test.use({
expectedErrors: [/500.*\/api\/version|Failed to load resource.*500/],
});
test("allowlisted 5xx passes", async ({ page, frigateApp }) => {
await page.route("**/api/version", (route) =>
route.fulfill({ status: 500, body: "boom" }),
);
await frigateApp.goto("/");
await page.evaluate(() => fetch("/api/version").catch(() => {}));
});
});

View File

@ -0,0 +1,73 @@
/**
* Self-tests for the mock override helpers. Verifies each helper
* intercepts the matched URL and returns the expected payload/status.
*/
import { test, expect } from "../../fixtures/frigate-test";
import { mockEmpty, mockError, mockDelay } from "../../helpers/mock-overrides";
test.describe("Mock Overrides — empty @meta", () => {
test("mockEmpty returns []", async ({ page, frigateApp }) => {
await mockEmpty(page, "**/api/__meta_test__");
await frigateApp.goto("/");
const result = await page.evaluate(async () => {
const r = await fetch("/api/__meta_test__");
return { status: r.status, body: await r.json() };
});
expect(result.status).toBe(200);
expect(result.body).toEqual([]);
});
});
test.describe("Mock Overrides — error default @meta", () => {
// Match both the collected request error and the browser's console echo.
// Using a single alternation regex avoids Playwright's isFixtureTuple
// collision with multi-element RegExp arrays.
test.use({
expectedErrors: [/500.*__meta_test__|Failed to load resource.*500/],
});
test("mockError returns 500 by default", async ({ page, frigateApp }) => {
await mockError(page, "**/api/__meta_test__");
await frigateApp.goto("/");
const status = await page.evaluate(async () => {
const r = await fetch("/api/__meta_test__");
return r.status;
});
expect(status).toBe(500);
});
});
test.describe("Mock Overrides — error custom status @meta", () => {
// The browser emits a "Failed to load resource" console.error for 404s,
// which the error collector catches even though 404 is not a 5xx.
test.use({
expectedErrors: [/Failed to load resource.*404|404.*__meta_test_404__/],
});
test("mockError accepts a custom status", async ({ page, frigateApp }) => {
await mockError(page, "**/api/__meta_test_404__", 404);
await frigateApp.goto("/");
const status = await page.evaluate(async () => {
const r = await fetch("/api/__meta_test_404__");
return r.status;
});
expect(status).toBe(404);
});
});
test.describe("Mock Overrides — delay @meta", () => {
test("mockDelay delays response by the requested ms", async ({
page,
frigateApp,
}) => {
await mockDelay(page, "**/api/__meta_test_delay__", 300, ["delayed"]);
await frigateApp.goto("/");
const elapsed = await page.evaluate(async () => {
const start = performance.now();
await fetch("/api/__meta_test_delay__");
return performance.now() - start;
});
expect(elapsed).toBeGreaterThanOrEqual(250);
});
});

View File

@ -1,74 +1,734 @@
/**
* Export page tests -- HIGH tier.
*
* Tests export card rendering with mock data, search filtering,
* and delete confirmation dialog.
*/
import { test, expect } from "../fixtures/frigate-test"; import { test, expect } from "../fixtures/frigate-test";
test.describe("Export Page - Cards @high", () => { test.describe("Export Page - Overview @high", () => {
test("export page renders export cards from mock data", async ({ test("renders uncategorized exports and case cards from mock data", async ({
frigateApp, frigateApp,
}) => { }) => {
await frigateApp.goto("/export"); await frigateApp.goto("/export");
await frigateApp.page.waitForTimeout(2000);
// Should show export names from our mock data
await expect( await expect(
frigateApp.page.getByText("Front Door - Person Alert"), frigateApp.page.getByText("Front Door - Person Alert"),
).toBeVisible({ timeout: 10_000 }); ).toBeVisible();
await expect( await expect(
frigateApp.page.getByText("Backyard - Car Detection"), frigateApp.page.getByText("Garage - In Progress"),
).toBeVisible();
await expect(
frigateApp.page.getByText("Package Theft Investigation"),
).toBeVisible(); ).toBeVisible();
}); });
test("export page shows in-progress indicator", async ({ frigateApp }) => { test("search filters uncategorized exports", async ({ frigateApp }) => {
await frigateApp.goto("/export"); await frigateApp.goto("/export");
await frigateApp.page.waitForTimeout(2000);
// "Garage - In Progress" export should be visible const searchInput = frigateApp.page.getByPlaceholder(/search/i).first();
await expect(frigateApp.page.getByText("Garage - In Progress")).toBeVisible( await searchInput.fill("Front Door");
{ timeout: 10_000 },
); await expect(
frigateApp.page.getByText("Front Door - Person Alert"),
).toBeVisible();
await expect(
frigateApp.page.getByText("Backyard - Car Detection"),
).toBeHidden();
await expect(
frigateApp.page.getByText("Garage - In Progress"),
).toBeHidden();
}); });
test("export page shows case grouping", async ({ frigateApp }) => { test("new case button opens the create case dialog", async ({
frigateApp,
}) => {
await frigateApp.goto("/export"); await frigateApp.goto("/export");
await frigateApp.page.waitForTimeout(3000);
// Cases may render differently depending on API response shape await frigateApp.page.getByRole("button", { name: "New Case" }).click();
const pageText = await frigateApp.page.textContent("#pageRoot");
expect(pageText?.length).toBeGreaterThan(0); await expect(
frigateApp.page.getByRole("dialog").filter({ hasText: "Create Case" }),
).toBeVisible();
await expect(frigateApp.page.getByPlaceholder("Case name")).toBeVisible();
}); });
}); });
test.describe("Export Page - Search @high", () => { test.describe("Export Page - Case Detail @high", () => {
test("search input filters export list", async ({ frigateApp }) => { test("opening a case shows its detail view and associated export", async ({
frigateApp,
}) => {
await frigateApp.goto("/export"); await frigateApp.goto("/export");
await frigateApp.page.waitForTimeout(2000);
const searchInput = frigateApp.page.locator( await frigateApp.page
'#pageRoot input[type="text"], #pageRoot input', .getByText("Package Theft Investigation")
.first()
.click();
await expect(
frigateApp.page.getByRole("heading", {
name: "Package Theft Investigation",
}),
).toBeVisible();
await expect(
frigateApp.page.getByText("Backyard - Car Detection"),
).toBeVisible();
await expect(
frigateApp.page.getByRole("button", { name: "Add Export" }),
).toBeVisible();
await expect(
frigateApp.page.getByRole("button", { name: "Edit Case" }),
).toBeVisible();
await expect(
frigateApp.page.getByRole("button", { name: "Delete Case" }),
).toBeVisible();
});
test("edit case opens a prefilled dialog", async ({ frigateApp }) => {
await frigateApp.goto("/export");
await frigateApp.page
.getByText("Package Theft Investigation")
.first()
.click();
await frigateApp.page.getByRole("button", { name: "Edit Case" }).click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: "Edit Case" });
await expect(dialog).toBeVisible();
await expect(dialog.locator("input")).toHaveValue(
"Package Theft Investigation",
); );
if ( await expect(dialog.locator("textarea")).toHaveValue(
(await searchInput.count()) > 0 && "Review of suspicious activity near the front porch",
(await searchInput.first().isVisible()) );
) { });
// Type a search term that matches one export
await searchInput.first().fill("Front Door"); test("add export shows completed uncategorized exports for assignment", async ({
await frigateApp.page.waitForTimeout(500); frigateApp,
// "Front Door - Person Alert" should still be visible }) => {
await expect( await frigateApp.goto("/export");
frigateApp.page.getByText("Front Door - Person Alert"),
).toBeVisible(); await frigateApp.page
.getByText("Package Theft Investigation")
.first()
.click();
await frigateApp.page.getByRole("button", { name: "Add Export" }).click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: "Add Export to Package Theft Investigation" });
await expect(dialog).toBeVisible();
// Completed, uncategorized exports are selectable
await expect(dialog.getByText("Front Door - Person Alert")).toBeVisible();
// In-progress exports are intentionally hidden by AssignExportDialog
// (see Exports.tsx filteredExports) — they can't be assigned until
// they finish, so they should not show in the picker.
await expect(dialog.getByText("Garage - In Progress")).toBeHidden();
});
test("delete case opens a confirmation dialog", async ({ frigateApp }) => {
await frigateApp.goto("/export");
await frigateApp.page
.getByText("Package Theft Investigation")
.first()
.click();
await frigateApp.page.getByRole("button", { name: "Delete Case" }).click();
const dialog = frigateApp.page
.getByRole("alertdialog")
.filter({ hasText: "Delete Case" });
await expect(dialog).toBeVisible();
await expect(dialog.getByText(/Package Theft Investigation/)).toBeVisible();
});
test("delete case can also delete its exports", async ({ frigateApp }) => {
let deleteRequestUrl: string | null = null;
let deleteCaseCompleted = false;
const initialCases = [
{
id: "case-001",
name: "Package Theft Investigation",
description: "Review of suspicious activity near the front porch",
created_at: 1775407931.3863528,
updated_at: 1775483531.3863528,
},
];
const initialExports = [
{
id: "export-001",
camera: "front_door",
name: "Front Door - Person Alert",
date: 1775490731.3863528,
video_path: "/exports/export-001.mp4",
thumb_path: "/exports/export-001-thumb.jpg",
in_progress: false,
export_case_id: null,
},
{
id: "export-002",
camera: "backyard",
name: "Backyard - Car Detection",
date: 1775483531.3863528,
video_path: "/exports/export-002.mp4",
thumb_path: "/exports/export-002-thumb.jpg",
in_progress: false,
export_case_id: "case-001",
},
{
id: "export-003",
camera: "garage",
name: "Garage - In Progress",
date: 1775492531.3863528,
video_path: "/exports/export-003.mp4",
thumb_path: "/exports/export-003-thumb.jpg",
in_progress: true,
export_case_id: null,
},
];
await frigateApp.page.route(/\/api\/cases(?:$|\?|\/)/, async (route) => {
const request = route.request();
if (request.method() === "DELETE") {
deleteRequestUrl = request.url();
deleteCaseCompleted = true;
return route.fulfill({ json: { success: true } });
}
if (request.method() === "GET") {
return route.fulfill({
json: deleteCaseCompleted ? [] : initialCases,
});
}
return route.fallback();
});
await frigateApp.page.route("**/api/exports**", async (route) => {
if (route.request().method() !== "GET") {
return route.fallback();
}
return route.fulfill({
json: deleteCaseCompleted
? initialExports.filter((exp) => exp.export_case_id !== "case-001")
: initialExports,
});
});
await frigateApp.goto("/export");
await frigateApp.page
.getByText("Package Theft Investigation")
.first()
.click();
await frigateApp.page.getByRole("button", { name: "Delete Case" }).click();
const dialog = frigateApp.page
.getByRole("alertdialog")
.filter({ hasText: "Delete Case" });
await expect(dialog).toBeVisible();
const deleteExportsSwitch = dialog.getByRole("switch", {
name: "Also delete exports",
});
await expect(deleteExportsSwitch).toHaveAttribute("aria-checked", "false");
await expect(
dialog.getByText(
"Exports will remain available as uncategorized exports.",
),
).toBeVisible();
await deleteExportsSwitch.click();
await expect(deleteExportsSwitch).toHaveAttribute("aria-checked", "true");
await expect(
dialog.getByText("All exports in this case will be permanently deleted."),
).toBeVisible();
await dialog.getByRole("button", { name: /^delete$/i }).click();
await expect
.poll(() => deleteRequestUrl)
.toContain("/api/cases/case-001?delete_exports=true");
await expect(dialog).toBeHidden();
await expect(
frigateApp.page.getByRole("heading", {
name: "Package Theft Investigation",
}),
).toBeHidden();
await expect(
frigateApp.page.getByText("Backyard - Car Detection"),
).toBeHidden();
await expect(
frigateApp.page.getByText("Front Door - Person Alert"),
).toBeVisible();
});
});
test.describe("Export Page - Empty State @high", () => {
test("renders the empty state when there are no exports or cases", async ({
frigateApp,
}) => {
await frigateApp.page.route("**/api/export**", (route) =>
route.fulfill({ json: [] }),
);
await frigateApp.page.route("**/api/exports**", (route) =>
route.fulfill({ json: [] }),
);
await frigateApp.page.route("**/api/cases", (route) =>
route.fulfill({ json: [] }),
);
await frigateApp.page.route("**/api/cases**", (route) =>
route.fulfill({ json: [] }),
);
await frigateApp.goto("/export");
await expect(frigateApp.page.getByText("No exports found")).toBeVisible();
});
});
test.describe("Export Page - Mobile @high @mobile", () => {
test("mobile can open an export preview dialog", async ({ frigateApp }) => {
test.skip(!frigateApp.isMobile, "Mobile-only assertion");
await frigateApp.goto("/export");
await frigateApp.page
.getByText("Front Door - Person Alert")
.first()
.click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: "Front Door - Person Alert" });
await expect(dialog).toBeVisible();
await expect(dialog.locator("video")).toBeVisible();
});
});
test.describe("Multi-Review Export @high", () => {
// Two alert reviews close enough to "now" to fall within the
// default last-24-hours review window. Using numeric timestamps
// because the TS ReviewSegment type expects numbers even though
// the backend pydantic model serializes datetime as ISO strings —
// the app reads these as numbers for display math.
const now = Date.now() / 1000;
const mockReviews = [
{
id: "mex-review-001",
camera: "front_door",
start_time: now - 600,
end_time: now - 580,
has_been_reviewed: false,
severity: "alert",
thumb_path: "/clips/front_door/mex-review-001-thumb.jpg",
data: {
audio: [],
detections: ["person-001"],
objects: ["person"],
sub_labels: [],
significant_motion_areas: [],
zones: ["front_yard"],
},
},
{
id: "mex-review-002",
camera: "backyard",
start_time: now - 1200,
end_time: now - 1170,
has_been_reviewed: false,
severity: "alert",
thumb_path: "/clips/backyard/mex-review-002-thumb.jpg",
data: {
audio: [],
detections: ["car-002"],
objects: ["car"],
sub_labels: [],
significant_motion_areas: [],
zones: ["driveway"],
},
},
];
// 51 alert reviews, all front_door, spaced 5 minutes apart. Used by the
// over-limit test to trigger Ctrl+A select-all and verify the Export
// button is hidden at 51 selected.
const oversizedReviews = Array.from({ length: 51 }, (_, i) => ({
id: `mex-oversized-${i.toString().padStart(3, "0")}`,
camera: "front_door",
start_time: now - 60 * 60 - i * 300,
end_time: now - 60 * 60 - i * 300 + 20,
has_been_reviewed: false,
severity: "alert",
thumb_path: `/clips/front_door/mex-oversized-${i}-thumb.jpg`,
data: {
audio: [],
detections: [`person-${i}`],
objects: ["person"],
sub_labels: [],
significant_motion_areas: [],
zones: ["front_yard"],
},
}));
const mockSummary = {
last24Hours: {
reviewed_alert: 0,
reviewed_detection: 0,
total_alert: 2,
total_detection: 0,
},
};
async function routeReviews(
page: import("@playwright/test").Page,
reviews: unknown[],
) {
// Intercept the actual `/api/review` endpoint (singular — the
// default api-mocker only registers `/api/reviews**` (plural)
// which does not match the real request URL).
await page.route(/\/api\/review(\?|$)/, (route) =>
route.fulfill({ json: reviews }),
);
await page.route(/\/api\/review\/summary/, (route) =>
route.fulfill({ json: mockSummary }),
);
}
test.beforeEach(async ({ frigateApp }) => {
await routeReviews(frigateApp.page, mockReviews);
// Empty cases list by default so the dialog defaults to "new case".
// Individual tests override this to populate existing cases.
await frigateApp.page.route("**/api/cases", (route) =>
route.fulfill({ json: [] }),
);
});
async function selectTwoReviews(frigateApp: {
page: import("@playwright/test").Page;
}) {
// Every review card has className `review-item` on its wrapper
// (see EventView.tsx). Cards also have data-start attributes that
// we can key off if needed.
const reviewItems = frigateApp.page.locator(".review-item");
await reviewItems.first().waitFor({ state: "visible", timeout: 10_000 });
// Meta-click the first two items to enter multi-select mode.
// PreviewThumbnailPlayer reads e.metaKey to decide multi-select.
await reviewItems.nth(0).click({ modifiers: ["Meta"] });
await reviewItems.nth(1).click();
}
test("selecting two reviews reveals the export button", async ({
frigateApp,
}) => {
test.skip(frigateApp.isMobile, "Desktop multi-select flow");
await frigateApp.goto("/review");
await selectTwoReviews(frigateApp);
// Action group replaces the filter bar once items are selected
await expect(frigateApp.page.getByText(/2.*selected/i)).toBeVisible({
timeout: 5_000,
});
const exportButton = frigateApp.page.getByRole("button", {
name: /export/i,
});
await expect(exportButton).toBeVisible();
});
test("clicking export opens the multi-review dialog with correct title", async ({
frigateApp,
}) => {
test.skip(frigateApp.isMobile, "Desktop multi-select flow");
await frigateApp.goto("/review");
await selectTwoReviews(frigateApp);
await frigateApp.page
.getByRole("button", { name: /export/i })
.first()
.click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: /Export 2 reviews/i });
await expect(dialog).toBeVisible({ timeout: 5_000 });
// The dialog uses a Select trigger for case selection (admins). The
// default "None" value is shown on the trigger.
await expect(dialog.locator("button[role='combobox']")).toBeVisible();
await expect(dialog.getByText(/None/)).toBeVisible();
});
test("starting an export posts the expected payload and navigates to the case", async ({
frigateApp,
}) => {
test.skip(frigateApp.isMobile, "Desktop multi-select flow");
let capturedPayload: unknown = null;
await frigateApp.page.route("**/api/exports/batch", async (route) => {
capturedPayload = route.request().postDataJSON();
await route.fulfill({
status: 202,
json: {
export_case_id: "new-case-xyz",
export_ids: ["front_door_a", "backyard_b"],
results: [
{
camera: "front_door",
export_id: "front_door_a",
success: true,
status: "queued",
error: null,
item_index: 0,
},
{
camera: "backyard",
export_id: "backyard_b",
success: true,
status: "queued",
error: null,
item_index: 1,
},
],
},
});
});
await frigateApp.goto("/review");
await selectTwoReviews(frigateApp);
await frigateApp.page
.getByRole("button", { name: /export/i })
.first()
.click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: /Export 2 reviews/i });
await expect(dialog).toBeVisible({ timeout: 5_000 });
// Select "Create new case" from the case dropdown (default is "None")
await dialog.locator("button[role='combobox']").click();
await frigateApp.page
.getByRole("option", { name: /Create new case/i })
.click();
const nameInput = dialog.locator("input").first();
await nameInput.fill("E2E Incident");
await dialog.getByRole("button", { name: /export 2 reviews/i }).click();
// Wait for the POST to fire
await expect.poll(() => capturedPayload, { timeout: 5_000 }).not.toBeNull();
const payload = capturedPayload as {
items: Array<{
camera: string;
start_time: number;
end_time: number;
image_path?: string;
client_item_id?: string;
}>;
new_case_name?: string;
export_case_id?: string;
};
expect(payload.items).toHaveLength(2);
expect(payload.new_case_name).toBe("E2E Incident");
// When creating a new case, we must NOT also send export_case_id —
// the two fields are mutually exclusive on the backend.
expect(payload.export_case_id).toBeUndefined();
expect(payload.items.map((i) => i.camera).sort()).toEqual([
"backyard",
"front_door",
]);
// Each item must preserve REVIEW_PADDING (4s) on the edges —
// i.e. the padded window is 8s longer than the original review.
// The mock reviews above have 20s and 30s raw durations, so the
// expected padded durations are 28s and 38s.
const paddedDurations = payload.items
.map((i) => i.end_time - i.start_time)
.sort((a, b) => a - b);
expect(paddedDurations).toEqual([28, 38]);
// Thumbnails should be passed through per item
for (const item of payload.items) {
expect(item.image_path).toMatch(/mex-review-\d+-thumb\.jpg$/);
} }
await expect(frigateApp.page.locator("#pageRoot")).toBeVisible(); expect(payload.items.map((item) => item.client_item_id)).toEqual([
}); "mex-review-001",
}); "mex-review-002",
]);
test.describe("Export Page - Controls @high", () => { await expect(frigateApp.page).toHaveURL(/caseId=new-case-xyz/, {
test("export page filter controls are present", async ({ frigateApp }) => { timeout: 5_000,
await frigateApp.goto("/export"); });
await frigateApp.page.waitForTimeout(1000); });
const buttons = frigateApp.page.locator("#pageRoot button");
const count = await buttons.count(); test("mobile opens a drawer (not a dialog) for the multi-review export flow", async ({
expect(count).toBeGreaterThan(0); frigateApp,
}) => {
test.skip(!frigateApp.isMobile, "Mobile-only Drawer assertion");
await frigateApp.goto("/review");
await selectTwoReviews(frigateApp);
await frigateApp.page
.getByRole("button", { name: /export/i })
.first()
.click();
// On mobile the component renders a shadcn Drawer, which uses
// role="dialog" but sets data-vaul-drawer. Desktop renders a
// shadcn Dialog with role="dialog" but no data-vaul-drawer.
// The title and submit button both contain "Export 2 reviews", so
// assert each element distinctly: the title is a heading and the
// submit button has role="button".
const drawer = frigateApp.page.locator("[data-vaul-drawer]");
await expect(drawer).toBeVisible({ timeout: 5_000 });
await expect(
drawer.getByRole("heading", { name: /Export 2 reviews/i }),
).toBeVisible();
await expect(
drawer.getByRole("button", { name: /export 2 reviews/i }),
).toBeVisible();
});
test("hides export button when more than 50 reviews are selected", async ({
frigateApp,
}) => {
test.skip(frigateApp.isMobile, "Desktop select-all keyboard flow");
// Override the default 2-review mock with 51 reviews before
// navigation. Playwright matches routes last-registered-first so
// this takes precedence over the beforeEach.
await routeReviews(frigateApp.page, oversizedReviews);
await frigateApp.goto("/review");
// Wait for any review item to render before firing the shortcut
await frigateApp.page
.locator(".review-item")
.first()
.waitFor({ state: "visible", timeout: 10_000 });
// Ctrl+A triggers onSelectAllReviews (see EventView.tsx useKeyboardListener)
await frigateApp.page.keyboard.press("Control+a");
// The action group should show "51 selected" but no Export button.
// Mark-as-reviewed is still there so the action bar is rendered.
// Scope the "Mark as reviewed" lookup to its exact aria-label because
// the page can render other "mark as reviewed" controls elsewhere
// (e.g. on individual cards) that would trip strict-mode matching.
await expect(frigateApp.page.getByText(/51.*selected/i)).toBeVisible({
timeout: 5_000,
});
await expect(
frigateApp.page.getByRole("button", { name: "Mark as reviewed" }),
).toBeVisible();
await expect(
frigateApp.page.getByRole("button", { name: /^export$/i }),
).toHaveCount(0);
});
test("attaching to an existing case sends export_case_id without new_case_name", async ({
frigateApp,
}) => {
test.skip(frigateApp.isMobile, "Desktop multi-select flow");
// Seed one existing case so the dialog can offer the "existing" branch.
// The fixture mocks the user as admin (adminProfile()), so useIsAdmin()
// is true and the dialog renders the "Existing case" radio.
await frigateApp.page.route("**/api/cases", (route) =>
route.fulfill({
json: [
{
id: "existing-case-abc",
name: "Incident #42",
description: "",
created_at: now - 3600,
updated_at: now - 3600,
},
],
}),
);
let capturedPayload: unknown = null;
await frigateApp.page.route("**/api/exports/batch", async (route) => {
capturedPayload = route.request().postDataJSON();
await route.fulfill({
status: 202,
json: {
export_case_id: "existing-case-abc",
export_ids: ["front_door_a", "backyard_b"],
results: [
{
camera: "front_door",
export_id: "front_door_a",
success: true,
status: "queued",
error: null,
item_index: 0,
},
{
camera: "backyard",
export_id: "backyard_b",
success: true,
status: "queued",
error: null,
item_index: 1,
},
],
},
});
});
await frigateApp.goto("/review");
await selectTwoReviews(frigateApp);
await frigateApp.page
.getByRole("button", { name: /export/i })
.first()
.click();
const dialog = frigateApp.page
.getByRole("dialog")
.filter({ hasText: /Export 2 reviews/i });
await expect(dialog).toBeVisible({ timeout: 5_000 });
// Open the Case Select dropdown and pick the seeded case directly.
// The dialog now uses a single Select listing existing cases above
// the "Create new case" option — no radio toggle needed.
const selectTrigger = dialog.locator("button[role='combobox']").first();
await selectTrigger.waitFor({ state: "visible", timeout: 5_000 });
await selectTrigger.click();
// The dropdown portal renders outside the dialog
await frigateApp.page.getByRole("option", { name: /Incident #42/ }).click();
await dialog.getByRole("button", { name: /export 2 reviews/i }).click();
await expect.poll(() => capturedPayload, { timeout: 5_000 }).not.toBeNull();
const payload = capturedPayload as {
items: unknown[];
new_case_name?: string;
new_case_description?: string;
export_case_id?: string;
};
expect(payload.export_case_id).toBe("existing-case-abc");
expect(payload.new_case_name).toBeUndefined();
expect(payload.new_case_description).toBeUndefined();
expect(payload.items).toHaveLength(2);
// Navigate should hit /export. useSearchEffect consumes the caseId
// query param and strips it once the case is found in the cases list,
// so we assert on the path, not the query string.
await expect(frigateApp.page).toHaveURL(/\/export(\?|$)/, {
timeout: 5_000,
});
}); });
}); });

View File

@ -7,7 +7,8 @@
"dev": "vite --host", "dev": "vite --host",
"postinstall": "patch-package", "postinstall": "patch-package",
"build": "tsc && vite build --base=/BASE_PATH/", "build": "tsc && vite build --base=/BASE_PATH/",
"lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore .", "lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore . && npm run e2e:lint",
"e2e:lint": "node e2e/scripts/lint-specs.mjs",
"lint:fix": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore --fix .", "lint:fix": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore --fix .",
"preview": "vite preview", "preview": "vite preview",
"prettier:write": "prettier -u -w --ignore-path .gitignore \"*.{ts,tsx,js,jsx,css,html}\"", "prettier:write": "prettier -u -w --ignore-path .gitignore \"*.{ts,tsx,js,jsx,css,html}\"",

View File

@ -50,24 +50,77 @@
"placeholder": "Name the Export" "placeholder": "Name the Export"
}, },
"case": { "case": {
"newCaseOption": "Create new case",
"newCaseNamePlaceholder": "New case name",
"newCaseDescriptionPlaceholder": "Case description",
"label": "Case", "label": "Case",
"nonAdminHelp": "A new case will be created for these exports.",
"placeholder": "Select a case" "placeholder": "Select a case"
}, },
"select": "Select", "select": "Select",
"export": "Export", "export": "Export",
"queueing": "Queueing Export...",
"selectOrExport": "Select or Export", "selectOrExport": "Select or Export",
"tabs": {
"export": "Single Camera",
"multiCamera": "Multi-Camera"
},
"multiCamera": {
"timeRange": "Time range",
"selectFromTimeline": "Select from Timeline",
"cameraSelection": "Cameras",
"cameraSelectionHelp": "Cameras with tracked objects in this time range are pre-selected",
"checkingActivity": "Checking camera activity...",
"noCameras": "No cameras available",
"detectionCount_one": "1 tracked object",
"detectionCount_other": "{{count}} tracked objects",
"nameLabel": "Export name",
"namePlaceholder": "Optional base name for these exports",
"queueingButton": "Queueing Exports...",
"exportButton_one": "Export 1 Camera",
"exportButton_other": "Export {{count}} Cameras"
},
"multi": {
"title_one": "Export 1 review",
"title_other": "Export {{count}} reviews",
"description": "Export each selected review. All exports will be grouped under a single case.",
"descriptionNoCase": "Export each selected review.",
"caseNamePlaceholder": "Review export - {{date}}",
"exportButton_one": "Export 1 review",
"exportButton_other": "Export {{count}} reviews",
"exportingButton": "Exporting...",
"toast": {
"started_one": "Started 1 export. Opening the case now.",
"started_other": "Started {{count}} exports. Opening the case now.",
"startedNoCase_one": "Started 1 export.",
"startedNoCase_other": "Started {{count}} exports.",
"partial": "Started {{successful}} of {{total}} exports. Failed: {{failedItems}}",
"failed": "Failed to start {{total}} exports. Failed: {{failedItems}}"
}
},
"toast": { "toast": {
"success": "Successfully started export. View the file in the exports page.", "success": "Successfully started export. View the file in the exports page.",
"queued": "Export queued. View progress in the exports page.",
"view": "View", "view": "View",
"batchSuccess_one": "Started 1 export. Opening the case now.",
"batchSuccess_other": "Started {{count}} exports. Opening the case now.",
"batchPartial": "Started {{successful}} of {{total}} exports. Failed cameras: {{failedCameras}}",
"batchFailed": "Failed to start {{total}} exports. Failed cameras: {{failedCameras}}",
"batchQueuedSuccess_one": "Queued 1 export. Opening the case now.",
"batchQueuedSuccess_other": "Queued {{count}} exports. Opening the case now.",
"batchQueuedPartial": "Queued {{successful}} of {{total}} exports. Failed cameras: {{failedCameras}}",
"batchQueueFailed": "Failed to queue {{total}} exports. Failed cameras: {{failedCameras}}",
"error": { "error": {
"failed": "Failed to start export: {{error}}", "failed": "Failed to queue export: {{error}}",
"endTimeMustAfterStartTime": "End time must be after start time", "endTimeMustAfterStartTime": "End time must be after start time",
"noVaildTimeSelected": "No valid time range selected" "noVaildTimeSelected": "No valid time range selected"
} }
}, },
"fromTimeline": { "fromTimeline": {
"saveExport": "Save Export", "saveExport": "Save Export",
"previewExport": "Preview Export" "queueingExport": "Queueing Export...",
"previewExport": "Preview Export",
"useThisRange": "Use This Range"
} }
}, },
"streaming": { "streaming": {

View File

@ -12,6 +12,23 @@
"result": "Result", "result": "Result",
"arguments": "Arguments:", "arguments": "Arguments:",
"response": "Response:", "response": "Response:",
"attachment_chip_label": "{{label}} on {{camera}}",
"attachment_chip_remove": "Remove attachment",
"open_in_explore": "Open in Explore",
"attach_event_aria": "Attach event {{eventId}}",
"attachment_picker_paste_label": "Or paste event ID",
"attachment_picker_attach": "Attach",
"attachment_picker_placeholder": "Attach an event",
"quick_reply_find_similar": "Find similar sightings",
"quick_reply_tell_me_more": "Tell me more about this",
"quick_reply_when_else": "When else was it seen?",
"quick_reply_find_similar_text": "Find similar sightings to this.",
"quick_reply_tell_me_more_text": "Tell me more about this one.",
"quick_reply_when_else_text": "When else was this seen?",
"anchor": "Reference",
"similarity_score": "Similarity",
"no_similar_objects_found": "No similar objects found.",
"semantic_search_required": "Semantic search must be enabled to find similar objects.",
"send": "Send", "send": "Send",
"suggested_requests": "Try asking:", "suggested_requests": "Try asking:",
"starting_requests": { "starting_requests": {

View File

@ -20,14 +20,30 @@
"downloadVideo": "Download video", "downloadVideo": "Download video",
"editName": "Edit name", "editName": "Edit name",
"deleteExport": "Delete export", "deleteExport": "Delete export",
"assignToCase": "Add to case" "assignToCase": "Add to case",
"removeFromCase": "Remove from case"
},
"toolbar": {
"newCase": "New Case",
"addExport": "Add Export",
"editCase": "Edit Case",
"deleteCase": "Delete Case"
}, },
"toast": { "toast": {
"error": { "error": {
"renameExportFailed": "Failed to rename export: {{errorMessage}}", "renameExportFailed": "Failed to rename export: {{errorMessage}}",
"assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}",
"caseSaveFailed": "Failed to save case: {{errorMessage}}",
"caseDeleteFailed": "Failed to delete case: {{errorMessage}}"
} }
}, },
"deleteCase": {
"label": "Delete Case",
"desc": "Are you sure you want to delete {{caseName}}?",
"descKeepExports": "Exports will remain available as uncategorized exports.",
"descDeleteExports": "All exports in this case will be permanently deleted.",
"deleteExports": "Also delete exports"
},
"caseDialog": { "caseDialog": {
"title": "Add to case", "title": "Add to case",
"description": "Choose an existing case or create a new one.", "description": "Choose an existing case or create a new one.",
@ -35,5 +51,73 @@
"newCaseOption": "Create new case", "newCaseOption": "Create new case",
"nameLabel": "Case name", "nameLabel": "Case name",
"descriptionLabel": "Description" "descriptionLabel": "Description"
},
"caseCard": {
"emptyCase": "No exports yet"
},
"jobCard": {
"defaultName": "{{camera}} export",
"queued": "Queued",
"running": "Running"
},
"caseView": {
"noDescription": "No description",
"createdAt": "Created {{value}}",
"exportCount_one": "1 export",
"exportCount_other": "{{count}} exports",
"cameraCount_one": "1 camera",
"cameraCount_other": "{{count}} cameras",
"showMore": "Show more",
"showLess": "Show less",
"emptyTitle": "This case is empty",
"emptyDescription": "Add existing uncategorized exports to keep the case organized.",
"emptyDescriptionNoExports": "There are no uncategorized exports available to add yet."
},
"caseEditor": {
"createTitle": "Create Case",
"editTitle": "Edit Case",
"namePlaceholder": "Case name",
"descriptionPlaceholder": "Add notes or context for this case"
},
"addExportDialog": {
"title": "Add Export to {{caseName}}",
"searchPlaceholder": "Search uncategorized exports",
"empty": "No uncategorized exports match this search.",
"addButton_one": "Add 1 Export",
"addButton_other": "Add {{count}} Exports",
"adding": "Adding..."
},
"selected_one": "{{count}} selected",
"selected_other": "{{count}} selected",
"bulkActions": {
"addToCase": "Add to Case",
"moveToCase": "Move to Case",
"removeFromCase": "Remove from Case",
"delete": "Delete",
"deleteNow": "Delete Now"
},
"bulkDelete": {
"title": "Delete Exports",
"desc_one": "Are you sure you want to delete {{count}} export?",
"desc_other": "Are you sure you want to delete {{count}} exports?"
},
"bulkRemoveFromCase": {
"title": "Remove from Case",
"desc_one": "Remove {{count}} export from this case?",
"desc_other": "Remove {{count}} exports from this case?",
"descKeepExports": "Exports will be moved to uncategorized.",
"descDeleteExports": "Exports will be permanently deleted.",
"deleteExports": "Delete exports instead"
},
"bulkToast": {
"success": {
"delete": "Successfully deleted exports",
"reassign": "Successfully updated case assignment",
"remove": "Successfully removed exports from case"
},
"error": {
"deleteFailed": "Failed to delete exports: {{errorMessage}}",
"reassignFailed": "Failed to update case assignment: {{errorMessage}}"
}
} }
} }

View File

@ -7,6 +7,7 @@ import useStats, { useAutoFrigateStats } from "@/hooks/use-stats";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import type { ProfilesApiResponse } from "@/types/profile"; import type { ProfilesApiResponse } from "@/types/profile";
import { getProfileColor } from "@/utils/profileColors"; import { getProfileColor } from "@/utils/profileColors";
import { useIsAdmin } from "@/hooks/use-is-admin";
import { useContext, useEffect, useMemo } from "react"; import { useContext, useEffect, useMemo } from "react";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import useSWR from "swr"; import useSWR from "swr";
@ -18,6 +19,7 @@ import { Link } from "react-router-dom";
export default function Statusbar() { export default function Statusbar() {
const { t } = useTranslation(["views/system"]); const { t } = useTranslation(["views/system"]);
const isAdmin = useIsAdmin();
const { messages, addMessage, clearMessages } = useContext( const { messages, addMessage, clearMessages } = useContext(
StatusBarMessagesContext, StatusBarMessagesContext,
@ -154,9 +156,23 @@ export default function Statusbar() {
</Link> </Link>
); );
})} })}
{activeProfile && ( {activeProfile &&
<Link to="/settings?page=profiles"> (isAdmin ? (
<div className="flex cursor-pointer items-center gap-2 text-sm hover:underline"> <Link to="/settings?page=profiles">
<div className="flex cursor-pointer items-center gap-2 text-sm hover:underline">
<span
className={cn(
"size-2 shrink-0 rounded-full",
activeProfile.color.dot,
)}
/>
<span className="max-w-[150px] truncate">
{activeProfile.friendlyName}
</span>
</div>
</Link>
) : (
<div className="flex items-center gap-2 text-sm">
<span <span
className={cn( className={cn(
"size-2 shrink-0 rounded-full", "size-2 shrink-0 rounded-full",
@ -167,8 +183,7 @@ export default function Statusbar() {
{activeProfile.friendlyName} {activeProfile.friendlyName}
</span> </span>
</div> </div>
</Link> ))}
)}
</div> </div>
<div className="no-scrollbar flex h-full max-w-[50%] items-center gap-2 overflow-x-auto"> <div className="no-scrollbar flex h-full max-w-[50%] items-center gap-2 overflow-x-auto">
{Object.entries(messages).length === 0 ? ( {Object.entries(messages).length === 0 ? (

View File

@ -1,6 +1,6 @@
import ActivityIndicator from "../indicators/activity-indicator"; import ActivityIndicator from "../indicators/activity-indicator";
import { Button } from "../ui/button"; import { Button } from "../ui/button";
import { useCallback, useMemo, useState } from "react"; import { useCallback, useMemo, useRef, useState } from "react";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { FiMoreVertical } from "react-icons/fi"; import { FiMoreVertical } from "react-icons/fi";
import { Skeleton } from "../ui/skeleton"; import { Skeleton } from "../ui/skeleton";
@ -13,7 +13,7 @@ import {
} from "../ui/dialog"; } from "../ui/dialog";
import { Input } from "../ui/input"; import { Input } from "../ui/input";
import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useKeyboardListener from "@/hooks/use-keyboard-listener";
import { DeleteClipType, Export, ExportCase } from "@/types/export"; import { DeleteClipType, Export, ExportCase, ExportJob } from "@/types/export";
import { baseUrl } from "@/api/baseUrl"; import { baseUrl } from "@/api/baseUrl";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { shareOrCopy } from "@/utils/browserUtil"; import { shareOrCopy } from "@/utils/browserUtil";
@ -27,7 +27,10 @@ import {
DropdownMenuItem, DropdownMenuItem,
DropdownMenuTrigger, DropdownMenuTrigger,
} from "../ui/dropdown-menu"; } from "../ui/dropdown-menu";
import { FaFolder } from "react-icons/fa"; import { FaFolder, FaVideo } from "react-icons/fa";
import { HiSquare2Stack } from "react-icons/hi2";
import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
import useContextMenu from "@/hooks/use-contextmenu";
type CaseCardProps = { type CaseCardProps = {
className: string; className: string;
@ -41,10 +44,15 @@ export function CaseCard({
exports, exports,
onSelect, onSelect,
}: CaseCardProps) { }: CaseCardProps) {
const { t } = useTranslation(["views/exports"]);
const firstExport = useMemo( const firstExport = useMemo(
() => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0),
[exports], [exports],
); );
const cameraCount = useMemo(
() => new Set(exports.map((exp) => exp.camera)).size,
[exports],
);
return ( return (
<div <div
@ -61,10 +69,30 @@ export function CaseCard({
alt="" alt=""
/> />
)} )}
{!firstExport && (
<div className="absolute inset-0 bg-gradient-to-br from-secondary via-secondary/80 to-muted" />
)}
<div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-16 bg-gradient-to-t from-black/60 to-transparent" /> <div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-16 bg-gradient-to-t from-black/60 to-transparent" />
<div className="absolute bottom-2 left-2 z-20 flex items-center justify-start gap-2 text-white"> <div className="absolute right-1 top-1 z-40 flex items-center gap-2 rounded-lg bg-black/50 px-2 py-1 text-xs text-white">
<FaFolder /> <div className="flex items-center gap-1">
<div className="capitalize">{exportCase.name}</div> <HiSquare2Stack className="size-3" />
<div>{exports.length}</div>
</div>
<div className="flex items-center gap-1">
<FaVideo className="size-3" />
<div>{cameraCount}</div>
</div>
</div>
<div className="absolute inset-x-2 bottom-2 z-20 text-white">
<div className="flex items-center justify-start gap-2">
<FaFolder />
<div className="truncate smart-capitalize">{exportCase.name}</div>
</div>
{exports.length === 0 && (
<div className="mt-1 text-xs text-white/80">
{t("caseCard.emptyCase")}
</div>
)}
</div> </div>
</div> </div>
); );
@ -73,18 +101,26 @@ export function CaseCard({
type ExportCardProps = { type ExportCardProps = {
className: string; className: string;
exportedRecording: Export; exportedRecording: Export;
isSelected?: boolean;
selectionMode?: boolean;
onSelect: (selected: Export) => void; onSelect: (selected: Export) => void;
onContextSelect?: (selected: Export) => void;
onRename: (original: string, update: string) => void; onRename: (original: string, update: string) => void;
onDelete: ({ file, exportName }: DeleteClipType) => void; onDelete: ({ file, exportName }: DeleteClipType) => void;
onAssignToCase?: (selected: Export) => void; onAssignToCase?: (selected: Export) => void;
onRemoveFromCase?: (selected: Export) => void;
}; };
export function ExportCard({ export function ExportCard({
className, className,
exportedRecording, exportedRecording,
isSelected,
selectionMode,
onSelect, onSelect,
onContextSelect,
onRename, onRename,
onDelete, onDelete,
onAssignToCase, onAssignToCase,
onRemoveFromCase,
}: ExportCardProps) { }: ExportCardProps) {
const { t } = useTranslation(["views/exports"]); const { t } = useTranslation(["views/exports"]);
const isAdmin = useIsAdmin(); const isAdmin = useIsAdmin();
@ -92,6 +128,15 @@ export function ExportCard({
exportedRecording.thumb_path.length > 0, exportedRecording.thumb_path.length > 0,
); );
// selection
const cardRef = useRef<HTMLDivElement | null>(null);
useContextMenu(cardRef, () => {
if (!exportedRecording.in_progress && onContextSelect) {
onContextSelect(exportedRecording);
}
});
// editing name // editing name
const [editName, setEditName] = useState<{ const [editName, setEditName] = useState<{
@ -180,13 +225,18 @@ export function ExportCard({
</Dialog> </Dialog>
<div <div
ref={cardRef}
className={cn( className={cn(
"relative flex aspect-video cursor-pointer items-center justify-center rounded-lg bg-black md:rounded-2xl", "relative flex aspect-video cursor-pointer items-center justify-center rounded-lg bg-black md:rounded-2xl",
className, className,
)} )}
onClick={() => { onClick={(e) => {
if (!exportedRecording.in_progress) { if (!exportedRecording.in_progress) {
onSelect(exportedRecording); if ((selectionMode || e.ctrlKey || e.metaKey) && onContextSelect) {
onContextSelect(exportedRecording);
} else {
onSelect(exportedRecording);
}
} }
}} }}
> >
@ -205,7 +255,7 @@ export function ExportCard({
)} )}
</> </>
)} )}
{!exportedRecording.in_progress && ( {!exportedRecording.in_progress && !selectionMode && (
<div className="absolute bottom-2 right-3 z-40"> <div className="absolute bottom-2 right-3 z-40">
<DropdownMenu modal={false}> <DropdownMenu modal={false}>
<DropdownMenuTrigger> <DropdownMenuTrigger>
@ -254,6 +304,18 @@ export function ExportCard({
{t("tooltip.assignToCase")} {t("tooltip.assignToCase")}
</DropdownMenuItem> </DropdownMenuItem>
)} )}
{isAdmin && onRemoveFromCase && (
<DropdownMenuItem
className="cursor-pointer"
aria-label={t("tooltip.removeFromCase")}
onClick={(e) => {
e.stopPropagation();
onRemoveFromCase(exportedRecording);
}}
>
{t("tooltip.removeFromCase")}
</DropdownMenuItem>
)}
{isAdmin && ( {isAdmin && (
<DropdownMenuItem <DropdownMenuItem
className="cursor-pointer" className="cursor-pointer"
@ -292,10 +354,61 @@ export function ExportCard({
<Skeleton className="absolute inset-0 aspect-video rounded-lg md:rounded-2xl" /> <Skeleton className="absolute inset-0 aspect-video rounded-lg md:rounded-2xl" />
)} )}
<ImageShadowOverlay /> <ImageShadowOverlay />
<div className="absolute bottom-2 left-3 flex items-end text-white smart-capitalize"> <div
{exportedRecording.name.replaceAll("_", " ")} className={cn(
"pointer-events-none absolute inset-0 z-10 size-full rounded-lg outline outline-[3px] -outline-offset-[2.8px] md:rounded-2xl",
isSelected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
)}
/>
<div className="absolute bottom-2 left-3 right-12 z-30 text-white">
<div className="truncate smart-capitalize">
{exportedRecording.name.replaceAll("_", " ")}
</div>
</div> </div>
</div> </div>
</> </>
); );
} }
type ActiveExportJobCardProps = {
className?: string;
job: ExportJob;
};
export function ActiveExportJobCard({
className = "",
job,
}: ActiveExportJobCardProps) {
const { t } = useTranslation(["views/exports", "common"]);
const cameraName = useCameraFriendlyName(job.camera);
const displayName = useMemo(() => {
if (job.name && job.name.length > 0) {
return job.name.replaceAll("_", " ");
}
return t("jobCard.defaultName", {
camera: cameraName,
});
}, [cameraName, job.name, t]);
const statusLabel =
job.status === "queued" ? t("jobCard.queued") : t("jobCard.running");
return (
<div
className={cn(
"relative flex aspect-video items-center justify-center overflow-hidden rounded-lg border border-dashed border-border bg-secondary/40 md:rounded-2xl",
className,
)}
>
<div className="absolute right-3 top-3 z-30 rounded-full bg-selected/90 px-2 py-1 text-xs text-selected-foreground">
{statusLabel}
</div>
<div className="flex flex-col items-center gap-3 px-6 text-center">
<ActivityIndicator />
<div className="text-sm font-medium text-primary">{displayName}</div>
</div>
</div>
);
}

View File

@ -81,7 +81,7 @@ export default function ReviewCard({
axios axios
.post( .post(
`export/${event.camera}/start/${event.start_time + REVIEW_PADDING}/end/${endTime}`, `export/${event.camera}/start/${event.start_time - REVIEW_PADDING}/end/${endTime}`,
{ playback: "realtime" }, { playback: "realtime" },
) )
.then((response) => { .then((response) => {

View File

@ -0,0 +1,111 @@
import { useApiHost } from "@/api";
import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
import { useTranslation } from "react-i18next";
import useSWR from "swr";
import { LuX, LuExternalLink } from "react-icons/lu";
import { Button } from "@/components/ui/button";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { cn } from "@/lib/utils";
import { getTranslatedLabel } from "@/utils/i18n";
type ChatAttachmentChipProps = {
eventId: string;
mode: "composer" | "bubble";
onRemove?: () => void;
};
/**
* Small horizontal chip rendering an event as an "attachment": a thumbnail,
* a friendly label like "Person on driveway", an optional remove X (composer
* mode), and an external-link icon that opens the event in Explore.
*/
export function ChatAttachmentChip({
eventId,
mode,
onRemove,
}: ChatAttachmentChipProps) {
const apiHost = useApiHost();
const { t } = useTranslation(["views/chat"]);
const { data: eventData } = useSWR<{ label: string; camera: string }[]>(
`event_ids?ids=${eventId}`,
);
const evt = eventData?.[0];
const cameraName = useCameraFriendlyName(evt?.camera);
const displayLabel = evt
? t("attachment_chip_label", {
label: getTranslatedLabel(evt.label),
camera: cameraName,
})
: eventId;
return (
<div
className={cn(
"inline-flex max-w-full items-center gap-2 rounded-lg border border-border bg-background/80 p-1.5 pr-2",
mode === "bubble" && "border-primary-foreground/30 bg-transparent",
)}
>
<div className="relative size-10 shrink-0 overflow-hidden rounded-md">
<img
className="size-full object-cover"
src={`${apiHost}api/events/${eventId}/thumbnail.webp`}
alt=""
loading="lazy"
onError={(e) => {
(e.currentTarget as HTMLImageElement).style.visibility = "hidden";
}}
/>
</div>
{evt ? (
<span
className={cn(
"truncate text-xs",
mode === "bubble"
? "text-primary-foreground/90"
: "text-foreground",
)}
>
{displayLabel}
</span>
) : (
<ActivityIndicator className="size-4" />
)}
<Tooltip>
<TooltipTrigger asChild>
<a
href={`/explore?event_id=${eventId}`}
target="_blank"
rel="noopener noreferrer"
className={cn(
"flex size-6 shrink-0 items-center justify-center rounded text-muted-foreground hover:text-foreground",
mode === "bubble" &&
"text-primary-foreground/70 hover:text-primary-foreground",
)}
onClick={(e) => e.stopPropagation()}
aria-label={t("open_in_explore")}
>
<LuExternalLink className="size-3.5" />
</a>
</TooltipTrigger>
<TooltipContent>{t("open_in_explore")}</TooltipContent>
</Tooltip>
{mode === "composer" && onRemove && (
<Button
variant="ghost"
size="icon"
className="size-6 shrink-0 text-muted-foreground hover:text-foreground"
onClick={onRemove}
aria-label={t("attachment_chip_remove")}
>
<LuX className="size-3.5" />
</Button>
)}
</div>
);
}

View File

@ -1,42 +1,97 @@
import { useApiHost } from "@/api"; import { useApiHost } from "@/api";
import { useTranslation } from "react-i18next";
import { LuExternalLink } from "react-icons/lu";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
type ChatEvent = { id: string; score?: number };
type ChatEventThumbnailsRowProps = { type ChatEventThumbnailsRowProps = {
events: { id: string }[]; events: ChatEvent[];
anchor?: { id: string } | null;
onAttach?: (eventId: string) => void;
}; };
/** /**
* Horizontal scroll row of event thumbnail images for chat (e.g. after search_objects). * Horizontal scroll row of event thumbnail images for chat.
* Renders nothing when events is empty. * Optionally renders an anchor thumbnail with a "reference" badge above the
* results, and per-event similarity scores when provided.
* Clicking a thumbnail calls onAttach; a small external-link overlay opens
* the event in Explore.
* Renders nothing when there is nothing to show.
*/ */
export function ChatEventThumbnailsRow({ export function ChatEventThumbnailsRow({
events, events,
anchor = null,
onAttach,
}: ChatEventThumbnailsRowProps) { }: ChatEventThumbnailsRowProps) {
const apiHost = useApiHost(); const apiHost = useApiHost();
const { t } = useTranslation(["views/chat"]);
if (events.length === 0) return null; if (events.length === 0 && !anchor) return null;
const renderThumb = (event: ChatEvent, isAnchor = false) => (
<div
key={event.id}
className={cn(
"relative aspect-square size-32 shrink-0 overflow-hidden rounded-lg",
isAnchor && "ring-2 ring-primary",
)}
>
<button
type="button"
className="block size-full"
onClick={() => onAttach?.(event.id)}
aria-label={t("attach_event_aria", { eventId: event.id })}
>
<img
className="size-full object-cover"
src={`${apiHost}api/events/${event.id}/thumbnail.webp`}
alt=""
loading="lazy"
/>
</button>
<Tooltip>
<TooltipTrigger asChild>
<a
href={`/explore?event_id=${event.id}`}
target="_blank"
rel="noopener noreferrer"
onClick={(e) => e.stopPropagation()}
className="absolute right-1 top-1 flex size-6 items-center justify-center rounded bg-black/60 text-white hover:bg-black/80"
aria-label={t("open_in_explore")}
>
<LuExternalLink className="size-3" />
</a>
</TooltipTrigger>
<TooltipContent>{t("open_in_explore")}</TooltipContent>
</Tooltip>
{isAnchor && (
<span className="pointer-events-none absolute left-1 top-1 rounded bg-primary px-1 text-[10px] text-primary-foreground">
{t("anchor")}
</span>
)}
</div>
);
return ( return (
<div className="flex min-w-0 max-w-full flex-col gap-1 self-start"> <div className="flex min-w-0 max-w-full flex-col gap-2 self-start">
<div className="scrollbar-container min-w-0 overflow-x-auto"> {anchor && (
<div className="flex w-max gap-2"> <div className="scrollbar-container min-w-0 overflow-x-auto">
{events.map((event) => ( <div className="flex w-max gap-2">{renderThumb(anchor, true)}</div>
<a
key={event.id}
href={`/explore?event_id=${event.id}`}
target="_blank"
rel="noopener noreferrer"
className="relative aspect-square size-32 shrink-0 overflow-hidden rounded-lg"
>
<img
className="size-full object-cover"
src={`${apiHost}api/events/${event.id}/thumbnail.webp`}
alt=""
loading="lazy"
/>
</a>
))}
</div> </div>
</div> )}
{events.length > 0 && (
<div className="scrollbar-container min-w-0 overflow-x-auto">
<div className="flex w-max gap-2">
{events.map((event) => renderThumb(event))}
</div>
</div>
)}
</div> </div>
); );
} }

View File

@ -15,6 +15,8 @@ import {
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { ChatAttachmentChip } from "@/components/chat/ChatAttachmentChip";
import { parseAttachedEvent } from "@/utils/chatUtil";
type MessageBubbleProps = { type MessageBubbleProps = {
role: "user" | "assistant"; role: "user" | "assistant";
@ -126,6 +128,10 @@ export function MessageBubble({
); );
} }
const { eventId: attachedEventId, body: displayContent } = isUser
? parseAttachedEvent(content)
: { eventId: null, body: content };
return ( return (
<div <div
className={cn( className={cn(
@ -140,9 +146,20 @@ export function MessageBubble({
)} )}
> >
{isUser ? ( {isUser ? (
content <div className="flex flex-col gap-2">
{attachedEventId && (
<ChatAttachmentChip eventId={attachedEventId} mode="bubble" />
)}
<div className="whitespace-pre-wrap">{displayContent}</div>
</div>
) : ( ) : (
<> <div
className={cn(
"[&>*:last-child]:inline",
!isComplete &&
"after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']",
)}
>
<ReactMarkdown <ReactMarkdown
remarkPlugins={[remarkGfm]} remarkPlugins={[remarkGfm]}
components={{ components={{
@ -168,10 +185,7 @@ export function MessageBubble({
> >
{content} {content}
</ReactMarkdown> </ReactMarkdown>
{!isComplete && ( </div>
<span className="ml-1 inline-block h-4 w-0.5 animate-pulse bg-foreground align-middle" />
)}
</>
)} )}
</div> </div>
<div className="flex items-center gap-0.5"> <div className="flex items-center gap-0.5">

View File

@ -0,0 +1,114 @@
import { useState } from "react";
import { useTranslation } from "react-i18next";
import { LuPaperclip } from "react-icons/lu";
import { useApiHost } from "@/api";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
const EVENT_ID_RE = /^[A-Za-z0-9._-]+$/;
type ChatPaperclipButtonProps = {
recentEventIds: string[];
onAttach: (eventId: string) => void;
disabled?: boolean;
};
/**
* Paperclip button with a popover for picking an event to attach.
* Shows a grid of recent thumbnails (from the latest assistant message) and a
* "paste event ID" fallback input.
*/
export function ChatPaperclipButton({
recentEventIds,
onAttach,
disabled = false,
}: ChatPaperclipButtonProps) {
const apiHost = useApiHost();
const { t } = useTranslation(["views/chat"]);
const [open, setOpen] = useState(false);
const [pasteId, setPasteId] = useState("");
const handlePickThumbnail = (eventId: string) => {
onAttach(eventId);
setOpen(false);
setPasteId("");
};
const handlePasteSubmit = () => {
const trimmed = pasteId.trim();
if (!trimmed || !EVENT_ID_RE.test(trimmed)) return;
onAttach(trimmed);
setOpen(false);
setPasteId("");
};
const handlePasteKeyDown = (e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter") {
e.preventDefault();
handlePasteSubmit();
}
};
return (
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="icon"
className="size-10 shrink-0 rounded-full"
disabled={disabled}
aria-label={t("attachment_picker_placeholder")}
>
<LuPaperclip className="size-4" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-72" align="start">
<div className="flex flex-col gap-3">
{recentEventIds.length > 0 && (
<div className="grid grid-cols-4 gap-2">
{recentEventIds.slice(0, 8).map((id) => (
<button
key={id}
type="button"
onClick={() => handlePickThumbnail(id)}
className="relative aspect-square overflow-hidden rounded-md ring-offset-background hover:ring-2 hover:ring-primary"
aria-label={t("attach_event_aria", { eventId: id })}
>
<img
className="size-full object-cover"
src={`${apiHost}api/events/${id}/thumbnail.webp`}
alt=""
loading="lazy"
/>
</button>
))}
</div>
)}
<div className="flex items-center gap-2">
<Input
placeholder={t("attachment_picker_paste_label")}
value={pasteId}
onChange={(e) => setPasteId(e.target.value)}
onKeyDown={handlePasteKeyDown}
className="h-8 text-xs"
/>
<Button
size="sm"
variant="select"
className="h-8"
disabled={!pasteId.trim() || !EVENT_ID_RE.test(pasteId.trim())}
onClick={handlePasteSubmit}
>
{t("attachment_picker_attach")}
</Button>
</div>
</div>
</PopoverContent>
</Popover>
);
}

View File

@ -0,0 +1,49 @@
import { useTranslation } from "react-i18next";
import { Button } from "@/components/ui/button";
type QuickReply = { labelKey: string; textKey: string };
const REPLIES: QuickReply[] = [
{
labelKey: "quick_reply_find_similar",
textKey: "quick_reply_find_similar_text",
},
{
labelKey: "quick_reply_tell_me_more",
textKey: "quick_reply_tell_me_more_text",
},
{ labelKey: "quick_reply_when_else", textKey: "quick_reply_when_else_text" },
];
type ChatQuickRepliesProps = {
onSend: (text: string) => void;
disabled?: boolean;
};
/**
* Row of pill buttons shown in the composer while an attachment is pending.
* Clicking a pill immediately calls onSend with the canned text.
*/
export function ChatQuickReplies({
onSend,
disabled = false,
}: ChatQuickRepliesProps) {
const { t } = useTranslation(["views/chat"]);
return (
<div className="flex w-full flex-wrap gap-2">
{REPLIES.map((reply) => (
<Button
key={reply.labelKey}
variant="outline"
size="sm"
className="h-7 rounded-full px-3 text-xs"
disabled={disabled}
onClick={() => onSend(t(reply.textKey))}
>
{t(reply.labelKey)}
</Button>
))}
</div>
);
}

View File

@ -56,6 +56,11 @@ const record: SectionConfigOverrides = {
}, },
camera: { camera: {
restartRequired: [], restartRequired: [],
hiddenFields: [
"enabled_in_config",
"sync_recordings",
"export.max_concurrent",
],
}, },
}; };

View File

@ -89,6 +89,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
const { t } = useTranslation(["components/camera"]); const { t } = useTranslation(["components/camera"]);
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
const allowedCameras = useAllowedCameras(); const allowedCameras = useAllowedCameras();
const hasFullCameraAccess = useHasFullCameraAccess();
const isAdmin = useIsAdmin(); const isAdmin = useIsAdmin();
// tooltip // tooltip
@ -125,7 +126,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
const allGroups = Object.entries(config.camera_groups); const allGroups = Object.entries(config.camera_groups);
// If custom role, filter out groups where user has no accessible cameras // If custom role, filter out groups where user has no accessible cameras
if (!isAdmin) { if (!hasFullCameraAccess) {
return allGroups return allGroups
.filter(([, groupConfig]) => { .filter(([, groupConfig]) => {
// Check if user has access to at least one camera in this group // Check if user has access to at least one camera in this group
@ -137,7 +138,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
} }
return allGroups.sort((a, b) => a[1].order - b[1].order); return allGroups.sort((a, b) => a[1].order - b[1].order);
}, [config, allowedCameras, isAdmin]); }, [config, allowedCameras, hasFullCameraAccess]);
// add group // add group

View File

@ -0,0 +1,384 @@
import { useCallback, useMemo, useState } from "react";
import axios from "axios";
import { Button, buttonVariants } from "../ui/button";
import { isDesktop } from "react-device-detect";
import { HiTrash } from "react-icons/hi";
import { LuFolderPlus, LuFolderX } from "react-icons/lu";
import { Export, ExportCase } from "@/types/export";
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
} from "../ui/alert-dialog";
import { Label } from "../ui/label";
import { Switch } from "../ui/switch";
import useKeyboardListener from "@/hooks/use-keyboard-listener";
import { useTranslation } from "react-i18next";
import { toast } from "sonner";
import { useIsAdmin } from "@/hooks/use-is-admin";
import OptionAndInputDialog from "../overlay/dialog/OptionAndInputDialog";
type ExportActionGroupProps = {
selectedExports: Export[];
setSelectedExports: (exports: Export[]) => void;
context: "uncategorized" | "case";
cases?: ExportCase[];
currentCaseId?: string;
mutate: () => void;
};
export default function ExportActionGroup({
selectedExports,
setSelectedExports,
context,
cases,
currentCaseId,
mutate,
}: ExportActionGroupProps) {
const { t } = useTranslation(["views/exports", "common"]);
const isAdmin = useIsAdmin();
const onClearSelected = useCallback(() => {
setSelectedExports([]);
}, [setSelectedExports]);
// ── Delete ──────────────────────────────────────────────────────
const onDelete = useCallback(() => {
const ids = selectedExports.map((e) => e.id);
axios
.post("exports/delete", { ids })
.then((resp) => {
if (resp.status === 200) {
toast.success(t("bulkToast.success.delete"), {
position: "top-center",
});
setSelectedExports([]);
mutate();
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("bulkToast.error.deleteFailed", { errorMessage }), {
position: "top-center",
});
});
}, [selectedExports, setSelectedExports, mutate, t]);
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const [bypassDialog, setBypassDialog] = useState(false);
useKeyboardListener(["Shift"], (_, modifiers) => {
setBypassDialog(modifiers.shift);
return false;
});
const handleDelete = useCallback(() => {
if (bypassDialog) {
onDelete();
} else {
setDeleteDialogOpen(true);
}
}, [bypassDialog, onDelete]);
// ── Remove from case ────────────────────────────────────────────
const [removeDialogOpen, setRemoveDialogOpen] = useState(false);
const [deleteExportsOnRemove, setDeleteExportsOnRemove] = useState(false);
const handleRemoveFromCase = useCallback(() => {
const ids = selectedExports.map((e) => e.id);
const request = deleteExportsOnRemove
? axios.post("exports/delete", { ids })
: axios.post("exports/reassign", { ids, export_case_id: null });
request
.then((resp) => {
if (resp.status === 200) {
toast.success(t("bulkToast.success.remove"), {
position: "top-center",
});
setSelectedExports([]);
mutate();
setRemoveDialogOpen(false);
setDeleteExportsOnRemove(false);
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), {
position: "top-center",
});
});
}, [selectedExports, deleteExportsOnRemove, setSelectedExports, mutate, t]);
// ── Case picker ─────────────────────────────────────────────────
const [casePickerOpen, setCasePickerOpen] = useState(false);
const caseOptions = useMemo(
() => [
...(cases ?? [])
.filter((c) => c.id !== currentCaseId)
.map((c) => ({
value: c.id,
label: c.name,
}))
.sort((a, b) => a.label.localeCompare(b.label)),
{
value: "new",
label: t("caseDialog.newCaseOption"),
},
],
[cases, currentCaseId, t],
);
const handleAssignToCase = useCallback(
async (caseId: string) => {
const ids = selectedExports.map((e) => e.id);
try {
await axios.post("exports/reassign", {
ids,
export_case_id: caseId,
});
toast.success(t("bulkToast.success.reassign"), {
position: "top-center",
});
setSelectedExports([]);
mutate();
} catch (error) {
const apiError = error as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
apiError.response?.data?.message ||
apiError.response?.data?.detail ||
"Unknown error";
toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), {
position: "top-center",
});
throw error;
}
},
[selectedExports, setSelectedExports, mutate, t],
);
const handleCreateNewCase = useCallback(
async (name: string, description: string) => {
const ids = selectedExports.map((e) => e.id);
try {
const createResp = await axios.post("cases", { name, description });
const newCaseId: string | undefined = createResp.data?.id;
if (newCaseId) {
await axios.post("exports/reassign", {
ids,
export_case_id: newCaseId,
});
}
toast.success(t("bulkToast.success.reassign"), {
position: "top-center",
});
setSelectedExports([]);
mutate();
} catch (error) {
const apiError = error as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
apiError.response?.data?.message ||
apiError.response?.data?.detail ||
"Unknown error";
toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), {
position: "top-center",
});
throw error;
}
},
[selectedExports, setSelectedExports, mutate, t],
);
return (
<>
{/* Delete confirmation dialog */}
<AlertDialog
open={deleteDialogOpen}
onOpenChange={() => setDeleteDialogOpen(!deleteDialogOpen)}
>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{t("bulkDelete.title")}</AlertDialogTitle>
</AlertDialogHeader>
<AlertDialogDescription>
{t("bulkDelete.desc", { count: selectedExports.length })}
</AlertDialogDescription>
<AlertDialogFooter>
<AlertDialogCancel>
{t("button.cancel", { ns: "common" })}
</AlertDialogCancel>
<AlertDialogAction
className={buttonVariants({ variant: "destructive" })}
onClick={onDelete}
>
{t("button.delete", { ns: "common" })}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
{/* Remove from case dialog */}
{context === "case" && (
<AlertDialog
open={removeDialogOpen}
onOpenChange={(open) => {
if (!open) {
setRemoveDialogOpen(false);
setDeleteExportsOnRemove(false);
}
}}
>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>
{t("bulkRemoveFromCase.title")}
</AlertDialogTitle>
<AlertDialogDescription>
{t("bulkRemoveFromCase.desc", {
count: selectedExports.length,
})}{" "}
{deleteExportsOnRemove
? t("bulkRemoveFromCase.descDeleteExports")
: t("bulkRemoveFromCase.descKeepExports")}
</AlertDialogDescription>
</AlertDialogHeader>
<div className="flex items-center justify-start gap-6">
<Label
htmlFor="bulk-delete-exports-switch"
className="cursor-pointer text-sm"
>
{t("bulkRemoveFromCase.deleteExports")}
</Label>
<Switch
id="bulk-delete-exports-switch"
checked={deleteExportsOnRemove}
onCheckedChange={setDeleteExportsOnRemove}
/>
</div>
<AlertDialogFooter>
<AlertDialogCancel>
{t("button.cancel", { ns: "common" })}
</AlertDialogCancel>
<AlertDialogAction
className={buttonVariants({ variant: "destructive" })}
onClick={handleRemoveFromCase}
>
{t("button.delete", { ns: "common" })}
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
)}
{/* Case picker dialog */}
<OptionAndInputDialog
open={casePickerOpen}
title={t("caseDialog.title")}
description={t("caseDialog.description")}
setOpen={setCasePickerOpen}
options={caseOptions}
nameLabel={t("caseDialog.nameLabel")}
descriptionLabel={t("caseDialog.descriptionLabel")}
initialValue={caseOptions[0]?.value}
newValueKey="new"
onSave={handleAssignToCase}
onCreateNew={handleCreateNewCase}
/>
{/* Action bar */}
<div className="flex w-full items-center justify-end gap-2">
<div className="mx-1 flex items-center justify-center text-sm text-muted-foreground">
<div className="p-1">
{t("selected", { count: selectedExports.length })}
</div>
<div className="p-1">{"|"}</div>
<div
className="cursor-pointer p-2 text-primary hover:rounded-lg hover:bg-secondary"
onClick={onClearSelected}
>
{t("button.unselect", { ns: "common" })}
</div>
</div>
{isAdmin && (
<div className="flex items-center gap-1 md:gap-2">
{/* Add to Case / Move to Case */}
<Button
className="flex items-center gap-2 p-2"
aria-label={
context === "case"
? t("bulkActions.moveToCase")
: t("bulkActions.addToCase")
}
size="sm"
onClick={() => setCasePickerOpen(true)}
>
<LuFolderPlus className="text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{context === "case"
? t("bulkActions.moveToCase")
: t("bulkActions.addToCase")}
</div>
)}
</Button>
{/* Remove from Case (case context only) */}
{context === "case" && (
<Button
className="flex items-center gap-2 p-2"
aria-label={t("bulkActions.removeFromCase")}
size="sm"
onClick={() => setRemoveDialogOpen(true)}
>
<LuFolderX className="text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{t("bulkActions.removeFromCase")}
</div>
)}
</Button>
)}
{/* Delete */}
<Button
className="flex items-center gap-2 p-2"
aria-label={t("button.delete", { ns: "common" })}
size="sm"
onClick={handleDelete}
>
<HiTrash className="text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{bypassDialog
? t("bulkActions.deleteNow")
: t("bulkActions.delete")}
</div>
)}
</Button>
</div>
)}
</div>
</>
);
}

View File

@ -6,6 +6,7 @@ import { isDesktop } from "react-device-detect";
import { FaCompactDisc } from "react-icons/fa"; import { FaCompactDisc } from "react-icons/fa";
import { HiTrash } from "react-icons/hi"; import { HiTrash } from "react-icons/hi";
import { ReviewSegment } from "@/types/review"; import { ReviewSegment } from "@/types/review";
import { MAX_BATCH_EXPORT_ITEMS } from "@/types/export";
import { import {
AlertDialog, AlertDialog,
AlertDialogAction, AlertDialogAction,
@ -20,6 +21,7 @@ import useKeyboardListener from "@/hooks/use-keyboard-listener";
import { Trans, useTranslation } from "react-i18next"; import { Trans, useTranslation } from "react-i18next";
import { toast } from "sonner"; import { toast } from "sonner";
import { useIsAdmin } from "@/hooks/use-is-admin"; import { useIsAdmin } from "@/hooks/use-is-admin";
import MultiExportDialog from "../overlay/MultiExportDialog";
type ReviewActionGroupProps = { type ReviewActionGroupProps = {
selectedReviews: ReviewSegment[]; selectedReviews: ReviewSegment[];
@ -164,6 +166,29 @@ export default function ReviewActionGroup({
)} )}
</Button> </Button>
)} )}
{selectedReviews.length >= 2 &&
selectedReviews.length <= MAX_BATCH_EXPORT_ITEMS && (
<MultiExportDialog
selectedReviews={selectedReviews}
onStarted={() => {
onClearSelected();
pullLatestData();
}}
>
<Button
className="flex items-center gap-2 p-2"
aria-label={t("recording.button.export")}
size="sm"
>
<FaCompactDisc className="text-secondary-foreground" />
{isDesktop && (
<div className="text-primary">
{t("recording.button.export")}
</div>
)}
</Button>
</MultiExportDialog>
)}
<Button <Button
className="flex items-center gap-2 p-2" className="flex items-center gap-2 p-2"
aria-label={ aria-label={

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@ import { Button } from "../ui/button";
import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa"; import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa";
import { LuBug, LuShare2 } from "react-icons/lu"; import { LuBug, LuShare2 } from "react-icons/lu";
import { TimeRange } from "@/types/timeline"; import { TimeRange } from "@/types/timeline";
import { ExportContent, ExportPreviewDialog } from "./ExportDialog"; import { ExportContent, ExportPreviewDialog, ExportTab } from "./ExportDialog";
import { import {
DebugReplayContent, DebugReplayContent,
SaveDebugReplayOverlay, SaveDebugReplayOverlay,
@ -26,6 +26,7 @@ import SaveExportOverlay from "./SaveExportOverlay";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { StartExportResponse } from "@/types/export";
import { ShareTimestampContent } from "./ShareTimestampDialog"; import { ShareTimestampContent } from "./ShareTimestampDialog";
type DrawerMode = type DrawerMode =
@ -109,6 +110,7 @@ export default function MobileReviewSettingsDrawer({
]); ]);
const navigate = useNavigate(); const navigate = useNavigate();
const [drawerMode, setDrawerMode] = useState<DrawerMode>("none"); const [drawerMode, setDrawerMode] = useState<DrawerMode>("none");
const [exportTab, setExportTab] = useState<ExportTab>("export");
const [selectedReplayOption, setSelectedReplayOption] = useState< const [selectedReplayOption, setSelectedReplayOption] = useState<
"1" | "5" | "custom" | "timeline" "1" | "5" | "custom" | "timeline"
>("1"); >("1");
@ -129,67 +131,112 @@ export default function MobileReviewSettingsDrawer({
const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>( const [selectedCaseId, setSelectedCaseId] = useState<string | undefined>(
undefined, undefined,
); );
const onStartExport = useCallback(() => { const [singleNewCaseName, setSingleNewCaseName] = useState("");
const [singleNewCaseDescription, setSingleNewCaseDescription] = useState("");
const [isStartingExport, setIsStartingExport] = useState(false);
const onStartExport = useCallback(async () => {
if (isStartingExport) {
return false;
}
if (!range) { if (!range) {
toast.error(t("toast.error.noValidTimeSelected"), { toast.error(
position: "top-center", t("export.toast.error.noVaildTimeSelected", {
}); ns: "components/dialog",
return; }),
{
position: "top-center",
},
);
return false;
} }
if (range.before < range.after) { if (range.before < range.after) {
toast.error(t("toast.error.endTimeMustAfterStartTime"), { toast.error(
position: "top-center", t("export.toast.error.endTimeMustAfterStartTime", {
}); ns: "components/dialog",
return; }),
{
position: "top-center",
},
);
return false;
} }
axios setIsStartingExport(true);
.post(
try {
let exportCaseId: string | undefined = selectedCaseId;
if (selectedCaseId === "new" && singleNewCaseName.trim().length > 0) {
const caseResp = await axios.post("cases", {
name: singleNewCaseName.trim(),
description: singleNewCaseDescription.trim() || undefined,
});
exportCaseId = caseResp.data?.id;
} else if (selectedCaseId === "new" || selectedCaseId === "none") {
exportCaseId = undefined;
}
await axios.post<StartExportResponse>(
`export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`, `export/${camera}/start/${Math.round(range.after)}/end/${Math.round(range.before)}`,
{ {
playback: "realtime", source: "recordings",
name, name,
export_case_id: selectedCaseId || undefined, export_case_id: exportCaseId,
}, },
) );
.then((response) => {
if (response.status == 200) { toast.success(t("export.toast.queued", { ns: "components/dialog" }), {
toast.success( position: "top-center",
t("export.toast.success", { ns: "components/dialog" }), action: (
{ <a href="/export" target="_blank" rel="noopener noreferrer">
position: "top-center", <Button>
action: ( {t("export.toast.view", { ns: "components/dialog" })}
<a href="/export" target="_blank" rel="noopener noreferrer"> </Button>
<Button> </a>
{t("export.toast.view", { ns: "components/dialog" })} ),
</Button>
</a>
),
},
);
setName("");
setSelectedCaseId(undefined);
setRange(undefined);
setMode("none");
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(
t("export.toast.error.failed", {
ns: "components/dialog",
errorMessage,
}),
{
position: "top-center",
},
);
}); });
}, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); setName("");
setSelectedCaseId(undefined);
setSingleNewCaseName("");
setSingleNewCaseDescription("");
setRange(undefined);
setMode("none");
return true;
} catch (error) {
const apiError = error as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
apiError.response?.data?.message ||
apiError.response?.data?.detail ||
"Unknown error";
toast.error(
t("export.toast.error.failed", {
ns: "components/dialog",
error: errorMessage,
}),
{
position: "top-center",
},
);
return false;
} finally {
setIsStartingExport(false);
}
}, [
camera,
isStartingExport,
name,
range,
selectedCaseId,
singleNewCaseDescription,
singleNewCaseName,
setRange,
setMode,
t,
]);
const onStartDebugReplay = useCallback(async () => { const onStartDebugReplay = useCallback(async () => {
if ( if (
@ -283,6 +330,7 @@ export default function MobileReviewSettingsDrawer({
className="flex w-full items-center justify-center gap-2" className="flex w-full items-center justify-center gap-2"
aria-label={t("export")} aria-label={t("export")}
onClick={() => { onClick={() => {
setExportTab("export");
setDrawerMode("export"); setDrawerMode("export");
setMode("select"); setMode("select");
}} }}
@ -368,14 +416,21 @@ export default function MobileReviewSettingsDrawer({
range={range} range={range}
name={name} name={name}
selectedCaseId={selectedCaseId} selectedCaseId={selectedCaseId}
singleNewCaseName={singleNewCaseName}
singleNewCaseDescription={singleNewCaseDescription}
activeTab={exportTab}
isStartingExport={isStartingExport}
onStartExport={onStartExport} onStartExport={onStartExport}
setActiveTab={setExportTab}
setName={setName} setName={setName}
setSelectedCaseId={setSelectedCaseId} setSelectedCaseId={setSelectedCaseId}
setSingleNewCaseName={setSingleNewCaseName}
setSingleNewCaseDescription={setSingleNewCaseDescription}
setRange={setRange} setRange={setRange}
setMode={(mode) => { setMode={(mode) => {
setMode(mode); setMode(mode);
if (mode == "timeline") { if (mode == "timeline" || mode == "timeline_multi") {
setDrawerMode("none"); setDrawerMode("none");
} }
}} }}
@ -383,6 +438,9 @@ export default function MobileReviewSettingsDrawer({
setMode("none"); setMode("none");
setRange(undefined); setRange(undefined);
setSelectedCaseId(undefined); setSelectedCaseId(undefined);
setSingleNewCaseName("");
setSingleNewCaseDescription("");
setExportTab("export");
setDrawerMode("select"); setDrawerMode("select");
}} }}
/> />
@ -542,9 +600,29 @@ export default function MobileReviewSettingsDrawer({
<> <>
<SaveExportOverlay <SaveExportOverlay
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2" className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
show={mode == "timeline"} show={mode == "timeline" || mode == "timeline_multi"}
onSave={() => onStartExport()} hidePreview={mode == "timeline_multi"}
onCancel={() => setMode("none")} isSaving={isStartingExport}
saveLabel={
mode == "timeline_multi"
? t("export.fromTimeline.useThisRange", { ns: "components/dialog" })
: undefined
}
onSave={() => {
if (mode == "timeline_multi") {
setExportTab("multi");
setDrawerMode("export");
setMode("select");
return;
}
void onStartExport();
}}
onCancel={() => {
setExportTab("export");
setRange(undefined);
setMode("none");
}}
onPreview={() => setShowExportPreview(true)} onPreview={() => setShowExportPreview(true)}
/> />
<SaveDebugReplayOverlay <SaveDebugReplayOverlay

View File

@ -0,0 +1,405 @@
import { useCallback, useMemo, useState } from "react";
import { isDesktop } from "react-device-detect";
import axios from "axios";
import { toast } from "sonner";
import { useTranslation } from "react-i18next";
import { useNavigate } from "react-router-dom";
import useSWR from "swr";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "../ui/dialog";
import {
Drawer,
DrawerContent,
DrawerDescription,
DrawerHeader,
DrawerTitle,
DrawerTrigger,
} from "../ui/drawer";
import { Button } from "../ui/button";
import { Input } from "../ui/input";
import { Label } from "../ui/label";
import {
Select,
SelectContent,
SelectItem,
SelectSeparator,
SelectTrigger,
SelectValue,
} from "../ui/select";
import { Textarea } from "../ui/textarea";
import {
BatchExportBody,
BatchExportResponse,
BatchExportResult,
ExportCase,
} from "@/types/export";
import { FrigateConfig } from "@/types/frigateConfig";
import { REVIEW_PADDING, ReviewSegment } from "@/types/review";
import { resolveCameraName } from "@/hooks/use-camera-friendly-name";
import { useDateLocale } from "@/hooks/use-date-locale";
import { useIsAdmin } from "@/hooks/use-is-admin";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
type MultiExportDialogProps = {
selectedReviews: ReviewSegment[];
onStarted: () => void;
children: React.ReactNode;
};
const NONE_CASE_OPTION = "none";
const NEW_CASE_OPTION = "new";
export default function MultiExportDialog({
selectedReviews,
onStarted,
children,
}: MultiExportDialogProps) {
const { t } = useTranslation(["components/dialog", "common"]);
const locale = useDateLocale();
const navigate = useNavigate();
const isAdmin = useIsAdmin();
const { data: config } = useSWR<FrigateConfig>("config");
// Only admins can attach exports to an existing case (enforced server-side
// by POST /exports/batch). Skip fetching the case list entirely for
// non-admins — they can only ever use the "Create new case" branch.
const { data: cases } = useSWR<ExportCase[]>(isAdmin ? "cases" : null);
const [open, setOpen] = useState(false);
const [caseSelection, setCaseSelection] = useState<string>(NONE_CASE_OPTION);
const [newCaseName, setNewCaseName] = useState("");
const [newCaseDescription, setNewCaseDescription] = useState("");
const [isExporting, setIsExporting] = useState(false);
const count = selectedReviews.length;
// Resolve a failed batch result back to a human-readable label via the
// client-provided review id when available. Falls back to item_index and
// finally camera name for defensive compatibility.
const formatFailureLabel = useCallback(
(result: BatchExportResult): string => {
const cameraName = resolveCameraName(config, result.camera);
if (result.client_item_id) {
const review = selectedReviews.find(
(item) => item.id === result.client_item_id,
);
if (review) {
const time = formatUnixTimestampToDateTime(review.start_time, {
date_style: "short",
time_style: "short",
locale,
});
return `${cameraName}${time}`;
}
}
if (
typeof result.item_index === "number" &&
result.item_index >= 0 &&
result.item_index < selectedReviews.length
) {
const review = selectedReviews[result.item_index];
const time = formatUnixTimestampToDateTime(review.start_time, {
date_style: "short",
time_style: "short",
locale,
});
return `${cameraName}${time}`;
}
return cameraName;
},
[config, locale, selectedReviews],
);
const defaultCaseName = useMemo(() => {
const formattedDate = formatUnixTimestampToDateTime(Date.now() / 1000, {
date_style: "medium",
time_style: "short",
locale,
});
return t("export.multi.caseNamePlaceholder", {
ns: "components/dialog",
date: formattedDate,
});
}, [t, locale]);
const resetState = useCallback(() => {
setCaseSelection(NONE_CASE_OPTION);
setNewCaseName("");
setNewCaseDescription("");
setIsExporting(false);
}, []);
const handleOpenChange = useCallback(
(next: boolean) => {
if (!next) {
resetState();
} else {
// Freshly reset each time so the default name reflects "now"
setCaseSelection(NONE_CASE_OPTION);
setNewCaseName(defaultCaseName);
setNewCaseDescription("");
setIsExporting(false);
}
setOpen(next);
},
[defaultCaseName, resetState],
);
const existingCases = useMemo(() => {
return (cases ?? []).slice().sort((a, b) => a.name.localeCompare(b.name));
}, [cases]);
const isNewCase = caseSelection === NEW_CASE_OPTION;
const canSubmit = useMemo(() => {
if (isExporting) return false;
if (count === 0) return false;
if (!isAdmin) return true;
if (isNewCase) {
return newCaseName.trim().length > 0;
}
return caseSelection.length > 0;
}, [caseSelection, count, isAdmin, isExporting, isNewCase, newCaseName]);
const handleSubmit = useCallback(async () => {
if (!canSubmit) return;
const items = selectedReviews.map((review) => ({
camera: review.camera,
start_time: review.start_time - REVIEW_PADDING,
end_time: (review.end_time ?? Date.now() / 1000) + REVIEW_PADDING,
image_path: review.thumb_path || undefined,
client_item_id: review.id,
}));
const payload: BatchExportBody = { items };
if (isAdmin && caseSelection !== NONE_CASE_OPTION) {
if (isNewCase) {
payload.new_case_name = newCaseName.trim();
payload.new_case_description = newCaseDescription.trim() || undefined;
} else {
payload.export_case_id = caseSelection;
}
}
setIsExporting(true);
try {
const response = await axios.post<BatchExportResponse>(
"exports/batch",
payload,
);
const results = response.data.results ?? [];
const successful = results.filter((r) => r.success);
const failed = results.filter((r) => !r.success);
if (successful.length > 0 && failed.length === 0) {
toast.success(
t(
isAdmin
? "export.multi.toast.started"
: "export.multi.toast.startedNoCase",
{
ns: "components/dialog",
count: successful.length,
},
),
{ position: "top-center" },
);
} else if (successful.length > 0 && failed.length > 0) {
// Resolve each failure to its review via item_index so same-camera
// items are disambiguated by time. Falls back to camera-only if the
// server didn't populate item_index.
const failedLabels = failed.map(formatFailureLabel).join(", ");
toast.success(
t("export.multi.toast.partial", {
ns: "components/dialog",
successful: successful.length,
total: results.length,
failedItems: failedLabels,
}),
{ position: "top-center" },
);
} else {
const failedLabels = failed.map(formatFailureLabel).join(", ");
toast.error(
t("export.multi.toast.failed", {
ns: "components/dialog",
total: results.length,
failedItems: failedLabels,
}),
{ position: "top-center" },
);
}
if (successful.length > 0) {
onStarted();
setOpen(false);
resetState();
if (response.data.export_case_id) {
navigate(`/export?caseId=${response.data.export_case_id}`);
}
}
} catch (error) {
const apiError = error as {
response?: { data?: { message?: string; detail?: string } };
};
const errorMessage =
apiError.response?.data?.message ||
apiError.response?.data?.detail ||
"Unknown error";
toast.error(
t("export.toast.error.failed", {
ns: "components/dialog",
error: errorMessage,
}),
{ position: "top-center" },
);
} finally {
setIsExporting(false);
}
}, [
canSubmit,
caseSelection,
formatFailureLabel,
isAdmin,
isNewCase,
navigate,
newCaseDescription,
newCaseName,
onStarted,
resetState,
selectedReviews,
t,
]);
// New-case inputs: rendered below the Select when caseSelection === "new",
// or rendered standalone for non-admins (who never see the Select since
// they cannot attach to an existing case).
const newCaseInputs = (
<div className="space-y-2 pt-1">
<Input
className="text-md"
placeholder={t("export.case.newCaseNamePlaceholder")}
value={newCaseName}
onChange={(event) => setNewCaseName(event.target.value)}
maxLength={100}
autoFocus={isDesktop}
/>
<Textarea
className="text-md"
placeholder={t("export.case.newCaseDescriptionPlaceholder")}
value={newCaseDescription}
onChange={(event) => setNewCaseDescription(event.target.value)}
rows={2}
/>
</div>
);
const body = (
<div className="flex flex-col gap-4">
{isAdmin && (
<div className="space-y-2">
<Label className="text-sm text-secondary-foreground">
{t("export.case.label")}
</Label>
<Select
value={caseSelection}
onValueChange={(value) => setCaseSelection(value)}
>
<SelectTrigger>
<SelectValue placeholder={t("export.case.placeholder")} />
</SelectTrigger>
<SelectContent>
<SelectItem value={NONE_CASE_OPTION}>
{t("label.none", { ns: "common" })}
</SelectItem>
{existingCases.map((caseItem) => (
<SelectItem key={caseItem.id} value={caseItem.id}>
{caseItem.name}
</SelectItem>
))}
<SelectSeparator />
<SelectItem value={NEW_CASE_OPTION}>
{t("export.case.newCaseOption")}
</SelectItem>
</SelectContent>
</Select>
{isNewCase && newCaseInputs}
</div>
)}
</div>
);
const footer = (
<>
<Button
variant="outline"
onClick={() => handleOpenChange(false)}
disabled={isExporting}
>
{t("button.cancel", { ns: "common" })}
</Button>
<Button
variant="select"
onClick={handleSubmit}
disabled={!canSubmit}
aria-label={t("export.multi.exportButton", { count: count })}
>
{isExporting
? t("export.multi.exportingButton")
: t("export.multi.exportButton", { count: count })}
</Button>
</>
);
if (isDesktop) {
return (
<Dialog open={open} onOpenChange={handleOpenChange}>
<DialogTrigger asChild>{children}</DialogTrigger>
<DialogContent className="sm:max-w-md">
<DialogHeader>
<DialogTitle>
{t("export.multi.title", { count: count })}
</DialogTitle>
<DialogDescription>
{isAdmin
? t("export.multi.description")
: t("export.multi.descriptionNoCase")}
</DialogDescription>
</DialogHeader>
{body}
<DialogFooter className="gap-2">{footer}</DialogFooter>
</DialogContent>
</Dialog>
);
}
return (
<Drawer open={open} onOpenChange={handleOpenChange}>
<DrawerTrigger asChild>{children}</DrawerTrigger>
<DrawerContent className="px-4 pb-6">
<DrawerHeader className="px-0">
<DrawerTitle>{t("export.multi.title", { count: count })}</DrawerTitle>
<DrawerDescription>
{isAdmin
? t("export.multi.description")
: t("export.multi.descriptionNoCase")}
</DrawerDescription>
</DrawerHeader>
{body}
<div className="mt-4 flex flex-col-reverse gap-2">{footer}</div>
</DrawerContent>
</Drawer>
);
}

View File

@ -7,6 +7,9 @@ import { useTranslation } from "react-i18next";
type SaveExportOverlayProps = { type SaveExportOverlayProps = {
className: string; className: string;
show: boolean; show: boolean;
hidePreview?: boolean;
saveLabel?: string;
isSaving?: boolean;
onPreview: () => void; onPreview: () => void;
onSave: () => void; onSave: () => void;
onCancel: () => void; onCancel: () => void;
@ -14,6 +17,9 @@ type SaveExportOverlayProps = {
export default function SaveExportOverlay({ export default function SaveExportOverlay({
className, className,
show, show,
hidePreview = false,
saveLabel,
isSaving = false,
onPreview, onPreview,
onSave, onSave,
onCancel, onCancel,
@ -32,29 +38,36 @@ export default function SaveExportOverlay({
className="flex items-center gap-1 text-primary" className="flex items-center gap-1 text-primary"
aria-label={t("button.cancel", { ns: "common" })} aria-label={t("button.cancel", { ns: "common" })}
size="sm" size="sm"
disabled={isSaving}
onClick={onCancel} onClick={onCancel}
> >
<LuX /> <LuX />
{t("button.cancel", { ns: "common" })} {t("button.cancel", { ns: "common" })}
</Button> </Button>
{!hidePreview && (
<Button
className="flex items-center gap-1"
aria-label={t("export.fromTimeline.previewExport")}
size="sm"
disabled={isSaving}
onClick={onPreview}
>
<LuVideo />
{t("export.fromTimeline.previewExport")}
</Button>
)}
<Button <Button
className="flex items-center gap-1" className="flex items-center gap-1"
aria-label={t("export.fromTimeline.previewExport")} aria-label={saveLabel || t("export.fromTimeline.saveExport")}
size="sm"
onClick={onPreview}
>
<LuVideo />
{t("export.fromTimeline.previewExport")}
</Button>
<Button
className="flex items-center gap-1"
aria-label={t("export.fromTimeline.saveExport")}
variant="select" variant="select"
size="sm" size="sm"
disabled={isSaving}
onClick={onSave} onClick={onSave}
> >
<FaCompactDisc /> <FaCompactDisc />
{t("export.fromTimeline.saveExport")} {isSaving
? t("export.fromTimeline.queueingExport")
: saveLabel || t("export.fromTimeline.saveExport")}
</Button> </Button>
</div> </div>
</div> </div>

View File

@ -6,7 +6,7 @@ import {
ThreatLevel, ThreatLevel,
THREAT_LEVEL_LABELS, THREAT_LEVEL_LABELS,
} from "@/types/review"; } from "@/types/review";
import React, { useEffect, useMemo, useState } from "react"; import React, { useEffect, useMemo, useRef, useState } from "react";
import { isDesktop } from "react-device-detect"; import { isDesktop } from "react-device-detect";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { MdAutoAwesome } from "react-icons/md"; import { MdAutoAwesome } from "react-icons/md";
@ -95,11 +95,12 @@ export function GenAISummaryDialog({
const Trigger = isDesktop ? DialogTrigger : DrawerTrigger; const Trigger = isDesktop ? DialogTrigger : DrawerTrigger;
const Content = isDesktop ? DialogContent : DrawerContent; const Content = isDesktop ? DialogContent : DrawerContent;
const onOpenRef = useRef(onOpen);
onOpenRef.current = onOpen;
useEffect(() => { useEffect(() => {
if (onOpen) { onOpenRef.current?.(open);
onOpen(open); }, [open]);
}
}, [open, onOpen]);
if (!aiAnalysis) { if (!aiAnalysis) {
return null; return null;

View File

@ -8,6 +8,7 @@ import {
DialogTitle, DialogTitle,
} from "@/components/ui/dialog"; } from "@/components/ui/dialog";
import { Input } from "@/components/ui/input"; import { Input } from "@/components/ui/input";
import { Textarea } from "@/components/ui/textarea";
import { import {
Select, Select,
SelectContent, SelectContent,
@ -15,9 +16,10 @@ import {
SelectTrigger, SelectTrigger,
SelectValue, SelectValue,
} from "@/components/ui/select"; } from "@/components/ui/select";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { useEffect, useMemo, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
type Option = { type Option = {
@ -35,8 +37,8 @@ type OptionAndInputDialogProps = {
nameLabel: string; nameLabel: string;
descriptionLabel: string; descriptionLabel: string;
setOpen: (open: boolean) => void; setOpen: (open: boolean) => void;
onSave: (value: string) => void; onSave: (value: string) => Promise<void>;
onCreateNew: (name: string, description: string) => void; onCreateNew: (name: string, description: string) => Promise<void>;
}; };
export default function OptionAndInputDialog({ export default function OptionAndInputDialog({
@ -69,10 +71,12 @@ export default function OptionAndInputDialog({
} }
}, [open, initialValue, firstOption]); }, [open, initialValue, firstOption]);
const [isLoading, setIsLoading] = useState(false);
const isNew = selectedValue === newValueKey; const isNew = selectedValue === newValueKey;
const disableSave = !selectedValue || (isNew && name.trim().length === 0); const disableSave =
!selectedValue || (isNew && name.trim().length === 0) || isLoading;
const handleSave = () => { const handleSave = useCallback(async () => {
if (!selectedValue) { if (!selectedValue) {
return; return;
} }
@ -80,13 +84,26 @@ export default function OptionAndInputDialog({
const trimmedName = name.trim(); const trimmedName = name.trim();
const trimmedDescription = descriptionValue.trim(); const trimmedDescription = descriptionValue.trim();
if (isNew) { setIsLoading(true);
onCreateNew(trimmedName, trimmedDescription); try {
} else { if (isNew) {
onSave(selectedValue); await onCreateNew(trimmedName, trimmedDescription);
} else {
await onSave(selectedValue);
}
setOpen(false);
} finally {
setIsLoading(false);
} }
setOpen(false); }, [
}; selectedValue,
name,
descriptionValue,
isNew,
onCreateNew,
onSave,
setOpen,
]);
return ( return (
<Dialog open={open} defaultOpen={false} onOpenChange={setOpen}> <Dialog open={open} defaultOpen={false} onOpenChange={setOpen}>
@ -127,15 +144,21 @@ export default function OptionAndInputDialog({
<label className="text-sm font-medium text-secondary-foreground"> <label className="text-sm font-medium text-secondary-foreground">
{nameLabel} {nameLabel}
</label> </label>
<Input value={name} onChange={(e) => setName(e.target.value)} /> <Input
className="text-md"
value={name}
onChange={(e) => setName(e.target.value)}
/>
</div> </div>
<div className="space-y-1"> <div className="space-y-1">
<label className="text-sm font-medium text-secondary-foreground"> <label className="text-sm font-medium text-secondary-foreground">
{descriptionLabel} {descriptionLabel}
</label> </label>
<Input <Textarea
className="text-md"
value={descriptionValue} value={descriptionValue}
onChange={(e) => setDescriptionValue(e.target.value)} onChange={(e) => setDescriptionValue(e.target.value)}
rows={2}
/> />
</div> </div>
</div> </div>
@ -145,6 +168,7 @@ export default function OptionAndInputDialog({
<Button <Button
type="button" type="button"
variant="outline" variant="outline"
disabled={isLoading}
onClick={() => { onClick={() => {
setOpen(false); setOpen(false);
}} }}
@ -155,9 +179,13 @@ export default function OptionAndInputDialog({
type="button" type="button"
variant="select" variant="select"
disabled={disableSave} disabled={disableSave}
onClick={handleSave} onClick={() => void handleSave()}
> >
{t("button.save")} {isLoading ? (
<ActivityIndicator className="size-4" />
) : (
t("button.save")
)}
</Button> </Button>
</DialogFooter> </DialogFooter>
</DialogContent> </DialogContent>

View File

@ -19,7 +19,7 @@ const Toaster = ({ ...props }: ToasterProps) => {
cancelButton: cancelButton:
"group-[.toast]:bg-muted group-[.toast]:text-muted-foreground", "group-[.toast]:bg-muted group-[.toast]:text-muted-foreground",
closeButton: closeButton:
"group-[.toast]:bg-secondary border-primary border-[1px]", "group-[.toast]:bg-secondary group-[.toast]:text-primary group-[.toast]:border-primary group-[.toast]:border-[1px]",
success: success:
"group toast group-[.toaster]:bg-success group-[.toaster]:text-foreground group-[.toaster]:border-border group-[.toaster]:shadow-lg", "group toast group-[.toaster]:bg-success group-[.toaster]:text-foreground group-[.toaster]:border-border group-[.toaster]:shadow-lg",
error: error:

View File

@ -11,15 +11,20 @@ import useDeepMemo from "./use-deep-memo";
import { capitalizeAll, capitalizeFirstLetter } from "@/utils/stringUtil"; import { capitalizeAll, capitalizeFirstLetter } from "@/utils/stringUtil";
import { isReplayCamera } from "@/utils/cameraUtil"; import { isReplayCamera } from "@/utils/cameraUtil";
import { useFrigateStats } from "@/api/ws"; import { useFrigateStats } from "@/api/ws";
import { useIsAdmin } from "./use-is-admin";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
export default function useStats(stats: FrigateStats | undefined) { export default function useStats(stats: FrigateStats | undefined) {
const { t } = useTranslation(["views/system"]); const { t } = useTranslation(["views/system"]);
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
const { data: debugReplayStatus } = useSWR("debug_replay/status", { const isAdmin = useIsAdmin();
revalidateOnFocus: false, const { data: debugReplayStatus } = useSWR(
}); isAdmin ? "debug_replay/status" : null,
{
revalidateOnFocus: false,
},
);
const memoizedStats = useDeepMemo(stats); const memoizedStats = useDeepMemo(stats);

View File

@ -1,17 +1,22 @@
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input"; import { Input } from "@/components/ui/input";
import { FaArrowUpLong } from "react-icons/fa6"; import { FaArrowUpLong, FaStop } from "react-icons/fa6";
import { LuCircleAlert } from "react-icons/lu"; import { LuCircleAlert } from "react-icons/lu";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { useState, useCallback, useRef, useEffect } from "react"; import { useState, useCallback, useRef, useEffect, useMemo } from "react";
import axios from "axios"; import axios from "axios";
import { ChatEventThumbnailsRow } from "@/components/chat/ChatEventThumbnailsRow"; import { ChatEventThumbnailsRow } from "@/components/chat/ChatEventThumbnailsRow";
import { MessageBubble } from "@/components/chat/ChatMessage"; import { MessageBubble } from "@/components/chat/ChatMessage";
import { ToolCallsGroup } from "@/components/chat/ToolCallsGroup"; import { ToolCallsGroup } from "@/components/chat/ToolCallsGroup";
import { ChatStartingState } from "@/components/chat/ChatStartingState"; import { ChatStartingState } from "@/components/chat/ChatStartingState";
import { ChatAttachmentChip } from "@/components/chat/ChatAttachmentChip";
import { ChatQuickReplies } from "@/components/chat/ChatQuickReplies";
import { ChatPaperclipButton } from "@/components/chat/ChatPaperclipButton";
import type { ChatMessage } from "@/types/chat"; import type { ChatMessage } from "@/types/chat";
import { import {
getEventIdsFromSearchObjectsToolCalls, getEventIdsFromSearchObjectsToolCalls,
getFindSimilarObjectsFromToolCalls,
prependAttachment,
streamChatCompletion, streamChatCompletion,
} from "@/utils/chatUtil"; } from "@/utils/chatUtil";
@ -21,7 +26,9 @@ export default function ChatPage() {
const [messages, setMessages] = useState<ChatMessage[]>([]); const [messages, setMessages] = useState<ChatMessage[]>([]);
const [isLoading, setIsLoading] = useState(false); const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<string | null>(null); const [error, setError] = useState<string | null>(null);
const [attachedEventId, setAttachedEventId] = useState<string | null>(null);
const scrollRef = useRef<HTMLDivElement>(null); const scrollRef = useRef<HTMLDivElement>(null);
const abortRef = useRef<AbortController | null>(null);
useEffect(() => { useEffect(() => {
document.title = t("documentTitle"); document.title = t("documentTitle");
@ -64,22 +71,59 @@ export default function ChatPage() {
...(axios.defaults.headers.common as Record<string, string>), ...(axios.defaults.headers.common as Record<string, string>),
}; };
await streamChatCompletion(url, headers, apiMessages, { const controller = new AbortController();
updateMessages: (updater) => setMessages(updater), abortRef.current = controller;
onError: (message) => setError(message),
onDone: () => setIsLoading(false), await streamChatCompletion(
defaultErrorMessage: t("error"), url,
}); headers,
apiMessages,
{
updateMessages: (updater) => setMessages(updater),
onError: (message) => setError(message),
onDone: () => {
abortRef.current = null;
setIsLoading(false);
},
defaultErrorMessage: t("error"),
},
controller.signal,
);
}, },
[isLoading, t], [isLoading, t],
); );
const sendMessage = useCallback(() => { const recentEventIds = useMemo(() => {
const text = input.trim(); for (let i = messages.length - 1; i >= 0; i--) {
if (!text || isLoading) return; const msg = messages[i];
setInput(""); if (msg.role !== "assistant" || !msg.toolCalls) continue;
submitConversation([...messages, { role: "user", content: text }]); const similar = getFindSimilarObjectsFromToolCalls(msg.toolCalls);
}, [input, isLoading, messages, submitConversation]); if (similar) return similar.results.map((e) => e.id);
const events = getEventIdsFromSearchObjectsToolCalls(msg.toolCalls);
if (events.length > 0) return events.map((e) => e.id);
}
return [];
}, [messages]);
const sendMessage = useCallback(
(textOverride?: string) => {
const text = (textOverride ?? input).trim();
if (!text || isLoading) return;
const wireText = attachedEventId
? prependAttachment(text, attachedEventId)
: text;
setInput("");
setAttachedEventId(null);
submitConversation([...messages, { role: "user", content: wireText }]);
},
[attachedEventId, input, isLoading, messages, submitConversation],
);
const stopGeneration = useCallback(() => {
abortRef.current?.abort();
abortRef.current = null;
setIsLoading(false);
}, []);
const handleEditSubmit = useCallback( const handleEditSubmit = useCallback(
(messageIndex: number, newContent: string) => { (messageIndex: number, newContent: string) => {
@ -92,6 +136,10 @@ export default function ChatPage() {
[messages, submitConversation], [messages, submitConversation],
); );
const handleClearAttachment = useCallback(() => {
setAttachedEventId(null);
}, []);
return ( return (
<div className="flex size-full justify-center p-2 md:p-4"> <div className="flex size-full justify-center p-2 md:p-4">
<div className="flex size-full flex-col xl:w-[50%] 3xl:w-[35%]"> <div className="flex size-full flex-col xl:w-[50%] 3xl:w-[35%]">
@ -161,10 +209,27 @@ export default function ChatPage() {
{msg.role === "assistant" && {msg.role === "assistant" &&
isComplete && isComplete &&
(() => { (() => {
const similar = getFindSimilarObjectsFromToolCalls(
msg.toolCalls,
);
if (similar) {
return (
<ChatEventThumbnailsRow
events={similar.results}
anchor={similar.anchor}
onAttach={setAttachedEventId}
/>
);
}
const events = getEventIdsFromSearchObjectsToolCalls( const events = getEventIdsFromSearchObjectsToolCalls(
msg.toolCalls, msg.toolCalls,
); );
return <ChatEventThumbnailsRow events={events} />; return (
<ChatEventThumbnailsRow
events={events}
onAttach={setAttachedEventId}
/>
);
})()} })()}
</div> </div>
); );
@ -188,6 +253,11 @@ export default function ChatPage() {
sendMessage={sendMessage} sendMessage={sendMessage}
isLoading={isLoading} isLoading={isLoading}
placeholder={t("placeholder")} placeholder={t("placeholder")}
attachedEventId={attachedEventId}
onClearAttachment={handleClearAttachment}
onAttach={setAttachedEventId}
onStop={stopGeneration}
recentEventIds={recentEventIds}
/> />
)} )}
</div> </div>
@ -198,9 +268,14 @@ export default function ChatPage() {
type ChatEntryProps = { type ChatEntryProps = {
input: string; input: string;
setInput: (value: string) => void; setInput: (value: string) => void;
sendMessage: () => void; sendMessage: (textOverride?: string) => void;
isLoading: boolean; isLoading: boolean;
placeholder: string; placeholder: string;
attachedEventId: string | null;
onClearAttachment: () => void;
onAttach: (eventId: string) => void;
onStop: () => void;
recentEventIds: string[];
}; };
function ChatEntry({ function ChatEntry({
@ -209,6 +284,11 @@ function ChatEntry({
sendMessage, sendMessage,
isLoading, isLoading,
placeholder, placeholder,
attachedEventId,
onClearAttachment,
onAttach,
onStop,
recentEventIds,
}: ChatEntryProps) { }: ChatEntryProps) {
const handleKeyDown = (e: React.KeyboardEvent<HTMLInputElement>) => { const handleKeyDown = (e: React.KeyboardEvent<HTMLInputElement>) => {
if (e.key === "Enter" && !e.shiftKey) { if (e.key === "Enter" && !e.shiftKey) {
@ -218,8 +298,28 @@ function ChatEntry({
}; };
return ( return (
<div className="mt-2 flex w-full flex-col items-center justify-center rounded-xl bg-secondary p-3"> <div className="mt-2 flex w-full flex-col items-stretch justify-center gap-2 rounded-xl bg-secondary p-3">
{attachedEventId && (
<div className="flex items-center">
<ChatAttachmentChip
eventId={attachedEventId}
mode="composer"
onRemove={onClearAttachment}
/>
</div>
)}
{attachedEventId && (
<ChatQuickReplies
onSend={(text) => sendMessage(text)}
disabled={isLoading}
/>
)}
<div className="flex w-full flex-row items-center gap-2"> <div className="flex w-full flex-row items-center gap-2">
<ChatPaperclipButton
recentEventIds={recentEventIds}
onAttach={onAttach}
disabled={isLoading || attachedEventId != null}
/>
<Input <Input
className="w-full flex-1 border-transparent bg-transparent shadow-none focus-visible:ring-0 dark:bg-transparent" className="w-full flex-1 border-transparent bg-transparent shadow-none focus-visible:ring-0 dark:bg-transparent"
placeholder={placeholder} placeholder={placeholder}
@ -228,14 +328,24 @@ function ChatEntry({
onKeyDown={handleKeyDown} onKeyDown={handleKeyDown}
aria-busy={isLoading} aria-busy={isLoading}
/> />
<Button {isLoading ? (
variant="select" <Button
className="size-10 shrink-0 rounded-full" variant="destructive"
disabled={!input.trim() || isLoading} className="size-10 shrink-0 rounded-full"
onClick={sendMessage} onClick={onStop}
> >
<FaArrowUpLong size="16" /> <FaStop className="size-3" />
</Button> </Button>
) : (
<Button
variant="select"
className="size-10 shrink-0 rounded-full"
disabled={!input.trim()}
onClick={() => sendMessage()}
>
<FaArrowUpLong className="size-4" />
</Button>
)}
</div> </div>
</div> </div>
); );

View File

@ -28,6 +28,7 @@ import {
import EventView from "@/views/events/EventView"; import EventView from "@/views/events/EventView";
import MotionSearchView from "@/views/motion-search/MotionSearchView"; import MotionSearchView from "@/views/motion-search/MotionSearchView";
import { RecordingView } from "@/views/recording/RecordingView"; import { RecordingView } from "@/views/recording/RecordingView";
import { useFrigateReviews } from "@/api/ws";
import axios from "axios"; import axios from "axios";
import { useCallback, useEffect, useMemo, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
@ -381,6 +382,32 @@ export default function Events() {
}; };
}, [reviews]); }, [reviews]);
// update review items in place when a review segment ends
const reviewUpdate = useFrigateReviews();
const [endedReviews, setEndedReviews] = useState(
new Map<string, ReviewSegment>(),
);
useEffect(() => {
if (reviewUpdate?.type === "end" && reviews) {
updateSegments(
(data) => {
if (!data) return data;
return data.map((seg) =>
seg.id === reviewUpdate.after.id ? reviewUpdate.after : seg,
);
},
{ revalidate: false, populateCache: true },
);
setEndedReviews((prev) =>
new Map(prev).set(reviewUpdate.after.id, reviewUpdate.after),
);
}
// reviews is intentionally excluded - only used to guard against
// updating the SWR cache before data has loaded
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [reviewUpdate, updateSegments]);
const currentItems = useMemo(() => { const currentItems = useMemo(() => {
if (!reviewItems || !severity) { if (!reviewItems || !severity) {
return null; return null;
@ -407,6 +434,13 @@ export default function Events() {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [severity, reviewFilter, showReviewed, reviewItems?.all.length]); }, [severity, reviewFilter, showReviewed, reviewItems?.all.length]);
// overlay end_time updates onto currentItems without re-running
// the has_been_reviewed filter, so hover-reviewed items stay visible
const displayItems = useMemo(() => {
if (!currentItems || endedReviews.size === 0) return currentItems;
return currentItems.map((seg) => endedReviews.get(seg.id) ?? seg);
}, [currentItems, endedReviews]);
// review summary // review summary
const { data: reviewSummary, mutate: updateSummary } = useSWR<ReviewSummary>( const { data: reviewSummary, mutate: updateSummary } = useSWR<ReviewSummary>(
@ -658,7 +692,7 @@ export default function Events() {
) : ( ) : (
<EventView <EventView
reviewItems={reviewItems} reviewItems={reviewItems}
currentReviewItems={currentItems} currentReviewItems={displayItems}
reviewSummary={reviewSummary} reviewSummary={reviewSummary}
recordingsSummary={recordingsSummary} recordingsSummary={recordingsSummary}
relevantPreviews={allPreviews} relevantPreviews={allPreviews}

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,8 @@ export type Export = {
video_path: string; video_path: string;
thumb_path: string; thumb_path: string;
in_progress: boolean; in_progress: boolean;
export_case?: string; export_case?: string | null;
export_case_id?: string | null;
}; };
export type ExportCase = { export type ExportCase = {
@ -17,6 +18,81 @@ export type ExportCase = {
updated_at: number; updated_at: number;
}; };
export type BatchExportBody = {
items: BatchExportItem[];
export_case_id?: string;
new_case_name?: string;
new_case_description?: string;
};
export const MAX_BATCH_EXPORT_ITEMS = 50;
export type BatchExportItem = {
camera: string;
start_time: number;
end_time: number;
image_path?: string;
friendly_name?: string;
client_item_id?: string;
};
export type BatchExportResult = {
camera: string;
export_id?: string | null;
success: boolean;
status?: string | null;
error?: string | null;
item_index?: number | null;
client_item_id?: string | null;
};
export type BatchExportResponse = {
export_case_id?: string | null;
export_ids: string[];
results: BatchExportResult[];
};
export type StartExportResponse = {
success: boolean;
message: string;
export_id?: string | null;
status?: string | null;
};
export type ExportJob = {
id: string;
job_type: string;
status: string;
camera: string;
name?: string | null;
export_case_id?: string | null;
request_start_time: number;
request_end_time: number;
start_time?: number | null;
end_time?: number | null;
error_message?: string | null;
results?: {
export_id?: string;
export_case_id?: string | null;
video_path?: string;
thumb_path?: string;
} | null;
};
export type CameraActivitySegment = {
/** Fractional start position within the time range, 0-1 inclusive. */
start: number;
/** Fractional end position within the time range, 0-1 inclusive. */
end: number;
};
export type CameraActivity = {
camera: string;
count: number;
hasDetections: boolean;
segments: CameraActivitySegment[];
};
export type DeleteClipType = { export type DeleteClipType = {
file: string; file: string;
exportName: string; exportName: string;

View File

@ -2,7 +2,7 @@
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
export type FilterType = { [searchKey: string]: any }; export type FilterType = { [searchKey: string]: any };
export type ExportMode = "select" | "timeline" | "none"; export type ExportMode = "select" | "timeline" | "timeline_multi" | "none";
export type FilterList = { export type FilterList = {
labels?: string[]; labels?: string[];

View File

@ -25,6 +25,7 @@ export async function streamChatCompletion(
headers: Record<string, string>, headers: Record<string, string>,
apiMessages: { role: string; content: string }[], apiMessages: { role: string; content: string }[],
callbacks: StreamChatCallbacks, callbacks: StreamChatCallbacks,
signal?: AbortSignal,
): Promise<void> { ): Promise<void> {
const { const {
updateMessages, updateMessages,
@ -38,6 +39,7 @@ export async function streamChatCompletion(
method: "POST", method: "POST",
headers, headers,
body: JSON.stringify({ messages: apiMessages, stream: true }), body: JSON.stringify({ messages: apiMessages, stream: true }),
signal,
}); });
if (!res.ok) { if (!res.ok) {
@ -152,11 +154,15 @@ export async function streamChatCompletion(
return next; return next;
}); });
} }
} catch { } catch (err) {
onError(defaultErrorMessage); if (err instanceof DOMException && err.name === "AbortError") {
updateMessages((prev) => // User stopped generation — not an error
prev.filter((m) => !(m.role === "assistant" && m.content === "")), } else {
); onError(defaultErrorMessage);
updateMessages((prev) =>
prev.filter((m) => !(m.role === "assistant" && m.content === "")),
);
}
} finally { } finally {
onDone(); onDone();
} }
@ -191,3 +197,72 @@ export function getEventIdsFromSearchObjectsToolCalls(
} }
return results; return results;
} }
const ATTACHED_EVENT_MARKER = /^\[attached_event:([A-Za-z0-9._-]+)\]\s*\n?/;
export function parseAttachedEvent(content: string): {
eventId: string | null;
body: string;
} {
if (!content) return { eventId: null, body: content };
const match = content.match(ATTACHED_EVENT_MARKER);
if (!match) return { eventId: null, body: content };
const body = content.slice(match[0].length).replace(/^\n+/, "");
return { eventId: match[1], body };
}
export function prependAttachment(body: string, eventId: string): string {
return `[attached_event:${eventId}]\n\n${body}`;
}
export type FindSimilarObjectsResult = {
anchor: { id: string } | null;
results: { id: string; score?: number }[];
};
/**
* Parse find_similar_objects tool call response(s) into anchor + ranked results.
* Returns null if no find_similar_objects call is present so the caller can
* decide whether to render.
*/
export function getFindSimilarObjectsFromToolCalls(
toolCalls: ToolCall[] | undefined,
): FindSimilarObjectsResult | null {
if (!toolCalls?.length) return null;
for (const tc of toolCalls) {
if (tc.name !== "find_similar_objects" || !tc.response?.trim()) continue;
try {
const parsed = JSON.parse(tc.response) as {
anchor?: { id?: unknown };
results?: unknown;
};
const anchorId =
parsed.anchor && typeof parsed.anchor.id === "string"
? parsed.anchor.id
: null;
const anchor = anchorId ? { id: anchorId } : null;
const results: { id: string; score?: number }[] = [];
if (Array.isArray(parsed.results)) {
for (const item of parsed.results) {
if (
item &&
typeof item === "object" &&
"id" in item &&
typeof (item as { id: unknown }).id === "string"
) {
const entry: { id: string; score?: number } = {
id: (item as { id: string }).id,
};
const rawScore = (item as { score?: unknown }).score;
if (typeof rawScore === "number") entry.score = rawScore;
results.push(entry);
}
}
}
return { anchor, results };
} catch {
// ignore parse errors
}
}
return null;
}

View File

@ -270,7 +270,10 @@ export default function MotionSearchView({
); );
useEffect(() => { useEffect(() => {
if (exportMode !== "timeline" || exportRange) { if (
(exportMode !== "timeline" && exportMode !== "timeline_multi") ||
exportRange
) {
return; return;
} }
@ -955,9 +958,25 @@ export default function MotionSearchView({
<SaveExportOverlay <SaveExportOverlay
className="pointer-events-none absolute inset-x-0 top-0 z-30" className="pointer-events-none absolute inset-x-0 top-0 z-30"
show={exportMode === "timeline" && Boolean(exportRange)} show={
(exportMode === "timeline" || exportMode === "timeline_multi") &&
Boolean(exportRange)
}
hidePreview={exportMode === "timeline_multi"}
saveLabel={
exportMode === "timeline_multi"
? t("export.fromTimeline.useThisRange", { ns: "components/dialog" })
: undefined
}
onPreview={handleExportPreview} onPreview={handleExportPreview}
onSave={handleExportSave} onSave={() => {
if (exportMode === "timeline_multi") {
setExportMode("select");
return;
}
handleExportSave();
}}
onCancel={handleExportCancel} onCancel={handleExportCancel}
/> />
@ -976,7 +995,10 @@ export default function MotionSearchView({
noRecordingRanges={noRecordings ?? []} noRecordingRanges={noRecordings ?? []}
contentRef={contentRef} contentRef={contentRef}
onHandlebarDraggingChange={(dragging) => setScrubbing(dragging)} onHandlebarDraggingChange={(dragging) => setScrubbing(dragging)}
showExportHandles={exportMode === "timeline" && Boolean(exportRange)} showExportHandles={
(exportMode === "timeline" || exportMode === "timeline_multi") &&
Boolean(exportRange)
}
exportStartTime={exportRange?.after} exportStartTime={exportRange?.after}
exportEndTime={exportRange?.before} exportEndTime={exportRange?.before}
setExportStartTime={setExportStartTime} setExportStartTime={setExportStartTime}
@ -1408,7 +1430,11 @@ export default function MotionSearchView({
onControllerReady={(controller) => { onControllerReady={(controller) => {
mainControllerRef.current = controller; mainControllerRef.current = controller;
}} }}
isScrubbing={scrubbing || exportMode == "timeline"} isScrubbing={
scrubbing ||
exportMode == "timeline" ||
exportMode == "timeline_multi"
}
supportsFullscreen={supportsFullScreen} supportsFullscreen={supportsFullScreen}
setFullResolution={setFullResolution} setFullResolution={setFullResolution}
toggleFullscreen={toggleFullscreen} toggleFullscreen={toggleFullscreen}

View File

@ -896,6 +896,7 @@ export function RecordingView({
isScrubbing={ isScrubbing={
scrubbing || scrubbing ||
exportMode == "timeline" || exportMode == "timeline" ||
exportMode == "timeline_multi" ||
debugReplayMode == "timeline" debugReplayMode == "timeline"
} }
supportsFullscreen={supportsFullScreen} supportsFullscreen={supportsFullScreen}
@ -974,7 +975,7 @@ export function RecordingView({
activeReviewItem={activeReviewItem} activeReviewItem={activeReviewItem}
currentTime={currentTime} currentTime={currentTime}
exportRange={ exportRange={
exportMode == "timeline" exportMode == "timeline" || exportMode == "timeline_multi"
? exportRange ? exportRange
: debugReplayMode == "timeline" : debugReplayMode == "timeline"
? debugReplayRange ? debugReplayRange

View File

@ -49,6 +49,7 @@ module.exports = {
scale4: "scale4 3s ease-in-out infinite", scale4: "scale4 3s ease-in-out infinite",
"timeline-zoom-in": "timeline-zoom-in 0.3s ease-out", "timeline-zoom-in": "timeline-zoom-in 0.3s ease-out",
"timeline-zoom-out": "timeline-zoom-out 0.3s ease-out", "timeline-zoom-out": "timeline-zoom-out 0.3s ease-out",
"cursor-blink": "cursor-blink 1s step-end infinite",
}, },
aspectRatio: { aspectRatio: {
wide: "32 / 9", wide: "32 / 9",
@ -189,6 +190,10 @@ module.exports = {
"50%": { transform: "translateY(0%)", opacity: "0.5" }, "50%": { transform: "translateY(0%)", opacity: "0.5" },
"100%": { transform: "translateY(0)", opacity: "1" }, "100%": { transform: "translateY(0)", opacity: "1" },
}, },
"cursor-blink": {
"0%, 100%": { opacity: "1" },
"50%": { opacity: "0" },
},
}, },
screens: { screens: {
xs: "480px", xs: "480px",