diff --git a/docker-compose.yml b/docker-compose.yml index db63297d5..1563057bb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,6 +14,8 @@ services: dockerfile: docker/main/Dockerfile # Use target devcontainer-trt for TensorRT dev target: devcontainer + cache_from: + - ghcr.io/blakeblackshear/frigate:cache-amd64 ## Uncomment this block for nvidia gpu support # deploy: # resources: diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 83c8416ff..c512ceb84 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -52,6 +52,14 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ --mount=type=cache,target=/root/.ccache \ /deps/build_sqlite_vec.sh +# Build intel-media-driver from source against bookworm's system libva so it +# works with Debian 12's glibc/libstdc++ (pre-built noble/trixie packages +# require glibc 2.38 which is not available on bookworm). +FROM base AS intel-media-driver +ARG DEBIAN_FRONTEND +RUN --mount=type=bind,source=docker/main/build_intel_media_driver.sh,target=/deps/build_intel_media_driver.sh \ + /deps/build_intel_media_driver.sh + FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin @@ -200,6 +208,7 @@ RUN --mount=type=bind,source=docker/main/install_hailort.sh,target=/deps/install FROM scratch AS deps-rootfs COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/ COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/ +COPY --from=intel-media-driver /rootfs/ / COPY --from=go2rtc /rootfs/ / COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=tempio /rootfs/ / diff --git a/docker/main/build_intel_media_driver.sh b/docker/main/build_intel_media_driver.sh new file mode 100755 index 000000000..acc9caf09 --- /dev/null +++ b/docker/main/build_intel_media_driver.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +set -euxo pipefail + +# Intel media driver is x86_64-only. Create empty rootfs on other arches so +# the downstream COPY --from has a valid source. +if [ "$(uname -m)" != "x86_64" ]; then + mkdir -p /rootfs + exit 0 +fi + +MEDIA_DRIVER_VERSION="intel-media-25.2.6" +GMMLIB_VERSION="intel-gmmlib-22.7.2" + +apt-get -qq update +apt-get -qq install -y wget gnupg ca-certificates cmake g++ make pkg-config + +# Use Intel's jammy repo for newer libva-dev (2.22) which provides the +# VVC/VVC-decode headers required by media-driver 25.x +wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg +echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" > /etc/apt/sources.list.d/intel-gpu-jammy.list +apt-get -qq update +apt-get -qq install -y libva-dev + +# Build gmmlib (required by media-driver) +wget -qO gmmlib.tar.gz "https://github.com/intel/gmmlib/archive/refs/tags/${GMMLIB_VERSION}.tar.gz" +mkdir /tmp/gmmlib +tar -xf gmmlib.tar.gz -C /tmp/gmmlib --strip-components 1 +cmake -S /tmp/gmmlib -B /tmp/gmmlib/build -DCMAKE_BUILD_TYPE=Release +make -C /tmp/gmmlib/build -j"$(nproc)" +make -C /tmp/gmmlib/build install + +# Build intel-media-driver +wget -qO media-driver.tar.gz "https://github.com/intel/media-driver/archive/refs/tags/${MEDIA_DRIVER_VERSION}.tar.gz" +mkdir /tmp/media-driver +tar -xf media-driver.tar.gz -C /tmp/media-driver --strip-components 1 +cmake -S /tmp/media-driver -B /tmp/media-driver/build \ + -DCMAKE_BUILD_TYPE=Release \ + -DENABLE_KERNELS=ON \ + -DENABLE_NONFREE_KERNELS=ON \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DCMAKE_INSTALL_LIBDIR=/usr/lib/x86_64-linux-gnu \ + -DCMAKE_C_FLAGS="-Wno-error" \ + -DCMAKE_CXX_FLAGS="-Wno-error" +make -C /tmp/media-driver/build -j"$(nproc)" + +# Install driver to rootfs for COPY --from +make -C /tmp/media-driver/build install DESTDIR=/rootfs diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index 2dfe07d35..d54a64c52 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -91,8 +91,10 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list apt-get -qq update + # intel-media-va-driver-non-free is built from source in the + # intel-media-driver Dockerfile stage for Battlemage (Xe2) support apt-get -qq install --no-install-recommends --no-install-suggests -y \ - intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 + libmfx1 libmfxgen1 libvpl2 apt-get -qq install -y ocl-icd-libopencl1 diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md index bb646e677..eba22ec18 100644 --- a/docs/docs/configuration/audio_detectors.md +++ b/docs/docs/configuration/audio_detectors.md @@ -119,6 +119,12 @@ audio: Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI's open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background. +:::info + +Audio transcription requires a one-time internet connection to download the Whisper or Sherpa-ONNX model on first use. Once cached, transcription runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service. #### Configuration diff --git a/docs/docs/configuration/bird_classification.md b/docs/docs/configuration/bird_classification.md index 75c0b8306..1c521314e 100644 --- a/docs/docs/configuration/bird_classification.md +++ b/docs/docs/configuration/bird_classification.md @@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath"; Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. +:::info + +Bird classification requires a one-time internet connection to download the classification model and label map from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + ## Minimum System Requirements Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself. diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md index 4c74bde63..636819070 100644 --- a/docs/docs/configuration/custom_classification/object_classification.md +++ b/docs/docs/configuration/custom_classification/object_classification.md @@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath"; Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API. +:::info + +Training a custom object classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + ## Minimum System Requirements Object classification models are lightweight and run very fast on CPU. diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md index 688b8bb0d..8b32857d0 100644 --- a/docs/docs/configuration/custom_classification/state_classification.md +++ b/docs/docs/configuration/custom_classification/state_classification.md @@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath"; State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate//classification/` MQTT topic and in Home Assistant sensors via the official Frigate integration. +:::info + +Training a custom state classification model requires a one-time internet connection to download MobileNetV2 base weights. Once trained, the model runs fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + ## Minimum System Requirements State classification models are lightweight and run very fast on CPU. diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index 7c23884cc..035e4f4e8 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -9,6 +9,12 @@ import NavPath from "@site/src/components/NavPath"; Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications. +:::info + +Face recognition requires a one-time internet connection to download detection and embedding models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + ## Model Requirements ### Face Detection diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index e39f88a02..a02a313ba 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -193,6 +193,12 @@ To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` env Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers. +:::info + +Cloud Generative AI providers require an active internet connection to send images and prompts for processing. Local providers like llama.cpp and Ollama (with local models) do not require internet. See [Network Requirements](/frigate/network_requirements#generative-ai) for details. + +::: + ### Ollama Cloud Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud). diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md index 918c23e67..7aeecfda9 100644 --- a/docs/docs/configuration/hardware_acceleration_video.md +++ b/docs/docs/configuration/hardware_acceleration_video.md @@ -59,13 +59,14 @@ Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video **Recommended hwaccel Preset** -| CPU Generation | Intel Driver | Recommended Preset | Notes | -| -------------- | ------------ | ------------------- | ------------------------------------------- | -| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 | -| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported | -| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used | -| gen13+ | iHD / Xe | preset-intel-qsv-\* | | -| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | | +| CPU Generation | Intel Driver | Recommended Preset | Notes | +| ------------------ | ------------ | ------------------- | ------------------------------------------- | +| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 | +| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported | +| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used | +| gen13+ | iHD / Xe | preset-intel-qsv-\* | | +| Intel Arc A-series | iHD / Xe | preset-intel-qsv-\* | | +| Intel Arc B-series | iHD / Xe | preset-intel-qsv-\* | Requires host kernel 6.12+ | ::: diff --git a/docs/docs/configuration/license_plate_recognition.md b/docs/docs/configuration/license_plate_recognition.md index 017cc5e16..c60618fd4 100644 --- a/docs/docs/configuration/license_plate_recognition.md +++ b/docs/docs/configuration/license_plate_recognition.md @@ -11,6 +11,12 @@ Frigate can recognize license plates on vehicles and automatically add the detec LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition. +:::info + +License plate recognition requires a one-time internet connection to download OCR and detection models from GitHub. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + When a plate is recognized, the details are: - Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object. diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 18e2054c4..5749379c6 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -21,6 +21,12 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt | mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. | | webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. | +:::info + +WebRTC may use an external STUN server for NAT traversal. MSE and HLS streaming do not require any internet access. See [Network Requirements](/frigate/network_requirements#webrtc-stun) for details. + +::: + ### Camera Settings Recommendations If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view: diff --git a/docs/docs/configuration/notifications.md b/docs/docs/configuration/notifications.md index 0ba84b8aa..cc9a7769f 100644 --- a/docs/docs/configuration/notifications.md +++ b/docs/docs/configuration/notifications.md @@ -11,6 +11,12 @@ import NavPath from "@site/src/components/NavPath"; Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption. +:::info + +Push notifications require internet access from the Frigate server to the browser vendor's push service (e.g., Google FCM, Mozilla autopush). See [Network Requirements](/frigate/network_requirements#push-notifications) for details. + +::: + ## Setting up Notifications In order to use notifications the following requirements must be met: diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 53aee4747..2821fb7a2 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -288,6 +288,12 @@ This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleratio See the [installation docs](../frigate/installation.md#hailo-8) for information on configuring the Hailo hardware. +:::info + +If no custom model is provided, the Hailo detector downloads a default model from the Hailo Model Zoo on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details. + +::: + ### Configuration When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**. @@ -1793,6 +1799,12 @@ Hardware accelerated object detection is supported on the following SoCs: This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2. +:::info + +If no custom model is provided, the RKNN detector downloads a default model from GitHub on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details. + +::: + :::tip When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be: @@ -2176,6 +2188,12 @@ This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AX See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware. +:::info + +The AXEngine detector downloads its default model from HuggingFace on first startup. Once cached, the model works fully offline. See [Network Requirements](/frigate/network_requirements#hardware-specific-detector-models) for details. + +::: + ### Configuration When configuring the AXEngine detector, you have to specify the model name. diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 043a9d0af..614beafed 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -281,31 +281,52 @@ Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only reco Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar. -### Time-lapse export +### Custom export with FFmpeg arguments -Time lapse exporting is available only via the [HTTP API](../integrations/api/export-recording-export-camera-name-start-start-time-end-end-time-post.api.mdx). +For advanced use cases, the [custom export HTTP API](../integrations/api/export-recording-custom-export-custom-camera-name-start-start-time-end-end-time-post.api.mdx) lets you pass custom FFmpeg arguments when exporting a recording: -When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS. - -To configure the speed-up factor, the frame rate and further custom settings, use the `timelapse_args` parameter. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS: - -```yaml {3-4} -record: - enabled: True - export: - timelapse_args: "-vf setpts=PTS/60 -r 25" +``` +POST /export/custom/{camera_name}/start/{start_time}/end/{end_time} ``` -:::tip +The request body accepts `ffmpeg_input_args` and `ffmpeg_output_args` to control encoding, frame rate, filters, and other FFmpeg options. If neither is provided, Frigate defaults to time-lapse output settings (25x speed, 30 FPS). -When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set the camera-level export hwaccel_args with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). +The following example exports a time-lapse at 60x speed with 25 FPS: + +```json +{ + "name": "Front Door Time-lapse", + "ffmpeg_output_args": "-vf setpts=PTS/60 -r 25" +} +``` + +#### CPU fallback + +If hardware acceleration is configured and the export fails (e.g., the GPU is unavailable), set `cpu_fallback: true` in the request body to automatically retry using software encoding. + +```json +{ + "name": "My Export", + "ffmpeg_output_args": "-c:v libx264 -crf 23", + "cpu_fallback": true +} +``` + +:::note + +Non-admin users are restricted from using FFmpeg arguments that can access the filesystem (e.g., `-filter_complex`, file paths, and protocol references). Admin users have full control over FFmpeg arguments. ::: :::tip -The encoder determines its own behavior so the resulting file size may be undesirably large. -To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. +When `hwaccel_args` is configured, hardware encoding is used for exports. This can be overridden per camera (e.g., when camera resolution exceeds hardware encoder limits) by setting a camera-level `hwaccel_args`. Using an unrecognized value or empty string falls back to software encoding (libx264). + +::: + +:::tip + +To reduce output file size, add the FFmpeg parameter `-qp n` to `ffmpeg_output_args` (where `n` is the quantization parameter). Adjust the value to balance quality and file size for your scenario. ::: diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index 49e0db88a..b2c5d1639 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -13,6 +13,12 @@ Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and Semantic Search is accessed via the _Explore_ view in the Frigate UI. +:::info + +Semantic search requires a one-time internet connection to download embedding models from HuggingFace. Once cached, models work fully offline. See [Network Requirements](/frigate/network_requirements#one-time-model-downloads) for details. + +::: + ## Minimum System Requirements Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all. diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index afbd95aaf..7df2ae0bb 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -146,17 +146,11 @@ A single Coral can handle many cameras using the default model and will be suffi The OpenVINO detector type is able to run on: - 6th Gen Intel Platforms and newer that have an iGPU -- x86 hosts with an Intel Arc GPU +- x86 hosts with an Intel Arc GPU (including Arc A-series and B-series Battlemage) - Intel NPUs - Most modern AMD CPUs (though this is officially not supported by Intel) - x86 & Arm64 hosts via CPU (generally not recommended) -:::note - -Intel B-series (Battlemage) GPUs are not officially supported with Frigate 0.17, though a user has [provided steps to rebuild the Frigate container](https://github.com/blakeblackshear/frigate/discussions/21257) with support for them. - -::: - More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below: diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 2f2e55fa0..5d228a609 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -482,7 +482,8 @@ services: - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder - /dev/video11:/dev/video11 # For Raspberry Pi 4B - /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware - - /dev/accel:/dev/accel # Intel NPU + - /dev/kfd:/dev/kfd # AMD Kernel Fusion Driver for ROCm + - /dev/accel:/dev/accel # AMD / Intel NPU volumes: - /etc/localtime:/etc/localtime:ro - /path/to/your/config:/config diff --git a/docs/docs/frigate/network_requirements.md b/docs/docs/frigate/network_requirements.md new file mode 100644 index 000000000..49d64272e --- /dev/null +++ b/docs/docs/frigate/network_requirements.md @@ -0,0 +1,155 @@ +--- +id: network_requirements +title: Network Requirements +--- + +# Network Requirements + +Frigate is designed to run locally and does not require a persistent internet connection for core functionality. However, certain features need internet access for initial setup or ongoing operation. This page describes what connects to the internet, when, and how to control it. + +## How Frigate Uses the Internet + +Frigate's internet usage falls into three categories: + +1. **One-time model downloads** — ML models are downloaded the first time a feature is enabled, then cached locally. No internet is needed on subsequent startups. +2. **Optional cloud services** — Features like Frigate+ and Generative AI connect to external APIs only when explicitly configured. +3. **Build-time dependencies** — Components bundled into the Docker image during the build process. These require no internet at runtime. + +:::tip + +After initial setup, Frigate can run fully offline as long as all required models have been downloaded and no cloud-dependent features are enabled. + +::: + +## One-Time Model Downloads + +The following models are downloaded automatically the first time their associated feature is enabled. Once cached in `/config/model_cache/`, they do not require internet again. + +| Feature | Models Downloaded | Source | +| --------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------- | +| [Semantic search](/configuration/semantic_search) | Jina CLIP v1 or v2 (ONNX) + tokenizer | HuggingFace | +| [Face recognition](/configuration/face_recognition) | FaceNet, ArcFace, face detection model | GitHub | +| [License plate recognition](/configuration/license_plate_recognition) | PaddleOCR (detection, classification, recognition) + YOLOv9 plate detector | GitHub | +| [Bird classification](/configuration/bird_classification) | MobileNetV2 bird model + label map | GitHub | +| [Custom classification](/configuration/custom_classification/state_classification) (training) | MobileNetV2 ImageNet base weights (via Keras) | Google storage | +| [Audio transcription](/configuration/advanced) | Whisper or Sherpa-ONNX streaming model | HuggingFace / OpenAI | + +### Hardware-Specific Detector Models + +If you are using one of the following hardware detectors and have not provided your own model file, a default model will be downloaded on first startup: + +| Detector | Model Downloaded | Source | +| ------------------------------------------------------------------ | -------------------- | ------------------------ | +| [Rockchip RKNN](/configuration/object_detectors#rockchip-platform) | RKNN detection model | GitHub | +| [Hailo 8 / 8L](/configuration/object_detectors#hailo-8) | YOLOv6n (.hef) | Hailo Model Zoo (AWS S3) | +| [AXERA AXEngine](/configuration/object_detectors) | Detection model | HuggingFace | + +:::note + +The default CPU, EdgeTPU, and OpenVINO object detection models are bundled into the Docker image and do not require any download at runtime. + +::: + +### Preventing Model Downloads + +If you have already downloaded all required models and want to prevent Frigate from attempting any outbound connections to HuggingFace or the Transformers library, set the following environment variables on your Frigate container: + +```yaml +environment: + HF_HUB_OFFLINE: "1" + TRANSFORMERS_OFFLINE: "1" +``` + +:::warning + +Setting these variables without having the correct model files already cached in `/config/model_cache/` will cause failures. Only use these after a successful initial setup with internet access. + +::: + +### Mirror Support + +If your Frigate instance has restricted internet access, you can point model downloads at internal mirrors using environment variables: + +| Environment Variable | Default | Used By | +| ----------------------------------- | ----------------------------------- | --------------------------------------------- | +| `HF_ENDPOINT` | `https://huggingface.co` | Semantic search, Sherpa-ONNX, AXEngine models | +| `GITHUB_ENDPOINT` | `https://github.com` | Face recognition, LPR, RKNN models | +| `GITHUB_RAW_ENDPOINT` | `https://raw.githubusercontent.com` | Bird classification | +| `TF_KERAS_MOBILENET_V2_WEIGHTS_URL` | Google storage (Keras default) | Custom classification training | + +## Optional Cloud Services + +These features connect to external services during normal operation and require internet whenever they are active. + +### Frigate+ + +When a Frigate+ API key is configured, Frigate communicates with `https://api.frigate.video` to download models, upload snapshots for training, submit annotations, and report false positives. Remove the API key to disable all Frigate+ network activity. + +See [Frigate+](/integrations/plus) for details. + +### Generative AI + +When a Generative AI provider is configured, Frigate sends images and prompts to the configured provider for event descriptions, chat, and camera monitoring. Available providers: + +| Provider | Internet Required | +| ------------- | ---------------------------------------------------------------- | +| OpenAI | Yes — connects to OpenAI API (or custom base URL) | +| Google Gemini | Yes — connects to Google Generative AI API | +| Azure OpenAI | Yes — connects to your Azure endpoint | +| Ollama | Depends — typically local (`localhost:11434`), but can be remote | +| llama.cpp | No — runs entirely locally | + +Disable Generative AI by removing the `genai` configuration from your cameras. See [Generative AI](/configuration/genai/genai_config) for details. + +### Version Check + +Frigate checks GitHub for the latest release version on startup by querying `https://api.github.com`. This can be disabled: + +```yaml +telemetry: + version_check: false +``` + +### Push Notifications + +When [notifications](/configuration/notifications) are enabled and users have registered for push notifications in the web UI, Frigate sends push messages through the browser vendor's push service (e.g., Google FCM, Mozilla autopush). This requires internet access from the Frigate server to these push endpoints. + +### MQTT + +If an [MQTT broker](/integrations/mqtt) is configured, Frigate maintains a connection to the broker's host and port. This is typically a local network connection, but will require internet if you use a cloud-hosted MQTT broker. + +### DeepStack / CodeProject.AI + +When using the [DeepStack detector plugin](/configuration/object_detectors), Frigate sends images to the configured API endpoint for inference. This is typically local but depends on where the service is hosted. + +## WebRTC (STUN) + +For [WebRTC live streaming](/configuration/live), Frigate uses STUN for NAT traversal: + +- **go2rtc** defaults to a local STUN listener (`stun:8555`) — no internet required. +- **The web UI's WebRTC player** includes a fallback to Google's public STUN server (`stun:stun.l.google.com:19302`), which requires internet. + +## Home Assistant Supervisor + +When running as a Home Assistant add-on, the go2rtc startup script queries the local Supervisor API (`http://supervisor/`) to discover the host IP address and WebRTC port. This is a local network call to the Home Assistant host, not an internet connection. + +## What Does NOT Require Internet + +- **Object detection** — CPU, EdgeTPU, OpenVINO, and other bundled detector models are included in the Docker image. +- **Recording and playback** — All video is stored and served locally. +- **Live streaming** — Camera streams are pulled over your local network. MSE and HLS streaming work without any external connections. +- **The web interface** — Fully self-contained with no external fonts, scripts, analytics, or CDN dependencies. All translations are bundled locally. +- **Custom classification inference** — After training, custom models run entirely locally. +- **Audio detection** — The YAMNet audio classification model is bundled in the Docker image. + +## Running Frigate Offline + +To run Frigate in an air-gapped or offline environment: + +1. **Pre-download models** — Start Frigate with internet access once with all desired features enabled. Models will be cached in `/config/model_cache/`. +2. **Disable version check** — Set `telemetry.version_check: false` in your configuration. +3. **Block outbound model requests** — Set the `HF_HUB_OFFLINE=1` and `TRANSFORMERS_OFFLINE=1` environment variables to prevent HuggingFace and Transformers from attempting any network requests. +4. **Avoid cloud features** — Do not configure Frigate+, Generative AI providers that require internet, or cloud MQTT brokers. +5. **Use local model mirrors** — If limited internet is available, set the `HF_ENDPOINT`, `GITHUB_ENDPOINT`, and `GITHUB_RAW_ENDPOINT` environment variables to point to local mirrors. + +After these steps, Frigate will operate with no outbound internet connections. diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 49d1a20e8..28c178d1f 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -5,6 +5,12 @@ title: MQTT These are the MQTT messages generated by Frigate. The default topic_prefix is `frigate`, but can be changed in the config file. +:::info + +MQTT requires a network connection to your broker. This is typically local, but will require internet if using a cloud-hosted MQTT broker. See [Network Requirements](/frigate/network_requirements#mqtt) for details. + +::: + ## General Frigate Topics ### `frigate/available` diff --git a/docs/docs/integrations/plus.md b/docs/docs/integrations/plus.md index aa3d78df5..9783cb212 100644 --- a/docs/docs/integrations/plus.md +++ b/docs/docs/integrations/plus.md @@ -5,6 +5,12 @@ title: Frigate+ For more information about how to use Frigate+ to improve your model, see the [Frigate+ docs](/plus/). +:::info + +Frigate+ requires an active internet connection to communicate with `https://api.frigate.video` for model downloads, image uploads, and annotations. See [Network Requirements](/frigate/network_requirements#frigate) for details. + +::: + ## Setup ### Create an account diff --git a/docs/docs/troubleshooting/recordings.md b/docs/docs/troubleshooting/recordings.md index b1f180a82..2425e653a 100644 --- a/docs/docs/troubleshooting/recordings.md +++ b/docs/docs/troubleshooting/recordings.md @@ -80,3 +80,85 @@ Some users found that mounting a drive via `fstab` with the `sync` option caused #### Copy Times < 1 second If the storage is working quickly then this error may be caused by CPU load on the machine being too high for Frigate to have the resources to keep up. Try temporarily shutting down other services to see if the issue improves. + +## I see the message: WARNING : Too many unprocessed recording segments in cache for camera. This likely indicates an issue with the detect stream... + +This warning means that the detect stream for the affected camera has fallen behind or stopped processing frames. Frigate's recording cache holds segments waiting to be analyzed by the detector — when more than 6 segments pile up without being processed, Frigate discards the oldest ones to prevent the cache from filling up. + +:::warning + +This error is a **symptom**, not the root cause. The actual cause is always logged **before** these messages start appearing. You must review the full logs from Frigate startup through the first occurrence of this warning to identify the real issue. + +::: + +### Step 1: Get the full logs + +Collect complete Frigate logs from startup through the first occurrence of the error. Look for errors or warnings that appear **before** the "Too many unprocessed" messages begin — that is where the root cause will be found. + +### Step 2: Check the cache directory + +Exec into the Frigate container and inspect the recording cache: + +``` +docker exec -it frigate ls -la /tmp/cache +``` + +Each camera should have a small number of `.mp4` segment files. If one camera has significantly more files than others, that camera is the source of the problem. A problem with a single camera can cascade and cause all cameras to show this error. + +### Step 3: Verify segment duration + +Recording segments should be approximately 10 seconds long. Run `ffprobe` on segments in the cache to check: + +``` +docker exec -it frigate ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1 /tmp/cache/@.mp4 +``` + +If segments are only ~1 second instead of ~10 seconds, the camera is sending corrupt timestamp data, causing segments to be split too frequently and filling the cache 10x faster than expected. + +**Common causes of short segments:** + +- **"Smart Codec" or "Smart+" enabled on the camera** — These features dynamically change encoding parameters mid-stream, which corrupts timestamps. Disable them in your camera's settings. +- **Changing codec, bitrate, or resolution mid-stream** — Any encoding changes during an active stream can cause unpredictable segment splitting. +- **Camera firmware bugs** — Check for firmware updates from your camera manufacturer. + +### Step 4: Check for a stuck detector + +If the detect stream is not processing frames, segments will accumulate. Common causes: + +- **Detection resolution too high** — Use a substream for detection, not the full resolution main stream. +- **Detection FPS too high** — 5 fps is the recommended maximum for detection. +- **Model too large** — Use smaller model variants (e.g., YOLO `s` or `t` size, not `e` or `x`). Use 320x320 input size rather than 640x640 unless you have a powerful dedicated detector. +- **Virtualization** — Running Frigate in a VM (especially Proxmox) can cause the detector to hang or stall. This is a known issue with GPU/TPU passthrough in virtualized environments and is not something Frigate can fix. Running Frigate in Docker on bare metal is recommended. + +### Step 5: Check for GPU hangs + +On the host machine, check `dmesg` for GPU-related errors: + +``` +dmesg | grep -i -E "gpu|drm|reset|hang" +``` + +Messages like `trying reset from guc_exec_queue_timedout_job` or similar GPU reset/hang messages indicate a driver or hardware issue. Ensure your kernel and GPU drivers (especially Intel) are up to date. + +### Step 6: Verify hardware acceleration configuration + +An incorrect `hwaccel_args` preset can cause ffmpeg to fail silently or consume excessive CPU, starving the detector of resources. + +- After upgrading Frigate, verify your preset matches your hardware (e.g., `preset-intel-qsv-h264` instead of the deprecated `preset-vaapi`). +- For h265 cameras, use the corresponding h265 preset (e.g., `preset-intel-qsv-h265`). +- Note that `hwaccel_args` are only relevant for the detect stream — Frigate does not decode the record stream. + +### Step 7: Verify go2rtc stream configuration + +Ensure that the ffmpeg source names in your go2rtc configuration match the correct camera stream. A misconfigured stream name (e.g., copying a config from one camera to another without updating the stream reference) will cause the wrong stream to be used or the stream to fail entirely. + +### Step 8: Check system resources + +If none of the above apply, the issue may be a general resource constraint. Monitor the following on your host: + +- **CPU usage** — An overloaded CPU can prevent the detector from keeping up. +- **RAM and swap** — Excessive swapping dramatically slows all I/O operations. +- **Disk I/O** — Use `iotop` or `iostat` to check for saturation. +- **Storage space** — Verify you have free space on the Frigate storage volume (check the Storage page in the Frigate UI). + +Try temporarily disabling resource-intensive features like `genai` and `face_recognition` to see if the issue resolves. This can help isolate whether the detector is being starved of resources. diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 2d1085af1..adc3bc1e1 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -12,6 +12,7 @@ const sidebars: SidebarsConfig = { "frigate/updating", "frigate/camera_setup", "frigate/video_pipeline", + "frigate/network_requirements", "frigate/glossary", ], Guides: [ diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index 90fa505ec..60621ff4e 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -2724,6 +2724,135 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" + /exports/batch: + post: + tags: + - Export + summary: Start recording export batch + description: >- + Starts recording exports for a batch of items, each with its own camera + and time range. Optionally assigns them to a new or existing export case. + When neither export_case_id nor new_case_name is provided, exports are + added as uncategorized. Attaching to an existing case is admin-only. + operationId: export_recordings_batch_exports_batch_post + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/BatchExportBody" + responses: + "202": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/BatchExportResponse" + "400": + description: Bad Request + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "403": + description: Forbidden + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "404": + description: Not Found + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "503": + description: Service Unavailable + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + /exports/delete: + post: + tags: + - Export + summary: Bulk delete exports + description: >- + Deletes one or more exports by ID. All IDs must exist and none can be + in-progress. Admin-only. + operationId: bulk_delete_exports_exports_delete_post + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ExportBulkDeleteBody" + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "400": + description: Bad Request - one or more exports are in-progress + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "404": + description: Not Found - one or more export IDs do not exist + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" + /exports/reassign: + post: + tags: + - Export + summary: Bulk reassign exports to a case + description: >- + Assigns or unassigns one or more exports to/from a case. All IDs must + exist. Pass export_case_id as null to unassign (move to uncategorized). + Admin-only. + operationId: bulk_reassign_exports_exports_reassign_post + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ExportBulkReassignBody" + responses: + "200": + description: Successful Response + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "404": + description: Not Found - one or more export IDs or the target case do not exist + content: + application/json: + schema: + $ref: "#/components/schemas/GenericResponse" + "422": + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/HTTPValidationError" /cases: get: tags: @@ -2853,39 +2982,6 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" - "/export/{export_id}/case": - patch: - tags: - - Export - summary: Assign export to case - description: "Assigns an export to a case, or unassigns it if export_case_id is null." - operationId: assign_export_case_export__export_id__case_patch - parameters: - - name: export_id - in: path - required: true - schema: - type: string - title: Export Id - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ExportCaseAssignBody" - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/GenericResponse" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" "/export/{camera_name}/start/{start_time}/end/{end_time}": post: tags: @@ -2973,32 +3069,6 @@ paths: application/json: schema: $ref: "#/components/schemas/HTTPValidationError" - "/export/{event_id}": - delete: - tags: - - Export - summary: Delete export - operationId: export_delete_export__event_id__delete - parameters: - - name: event_id - in: path - required: true - schema: - type: string - title: Event Id - responses: - "200": - description: Successful Response - content: - application/json: - schema: - $ref: "#/components/schemas/GenericResponse" - "422": - description: Validation Error - content: - application/json: - schema: - $ref: "#/components/schemas/HTTPValidationError" "/export/custom/{camera_name}/start/{start_time}/end/{end_time}": post: tags: @@ -6501,6 +6571,149 @@ components: required: - recognizedLicensePlate title: EventsLPRBody + BatchExportBody: + properties: + items: + items: + $ref: "#/components/schemas/BatchExportItem" + type: array + minItems: 1 + maxItems: 50 + title: Items + description: List of export items. Each item has its own camera and time range. + export_case_id: + anyOf: + - type: string + maxLength: 30 + - type: "null" + title: Export case ID + description: Existing export case ID to assign all exports to. Attaching to an existing case is temporarily admin-only until case-level ACLs exist. + new_case_name: + anyOf: + - type: string + maxLength: 100 + - type: "null" + title: New case name + description: Name of a new export case to create when export_case_id is omitted + new_case_description: + anyOf: + - type: string + - type: "null" + title: New case description + description: Optional description for a newly created export case + type: object + required: + - items + title: BatchExportBody + BatchExportItem: + properties: + camera: + type: string + title: Camera name + start_time: + type: number + title: Start time + end_time: + type: number + title: End time + image_path: + anyOf: + - type: string + - type: "null" + title: Existing thumbnail path + description: Optional existing image to use as the export thumbnail + friendly_name: + anyOf: + - type: string + maxLength: 256 + - type: "null" + title: Friendly name + description: Optional friendly name for this specific export item + client_item_id: + anyOf: + - type: string + maxLength: 128 + - type: "null" + title: Client item ID + description: Optional opaque client identifier echoed back in results + type: object + required: + - camera + - start_time + - end_time + title: BatchExportItem + BatchExportResponse: + properties: + export_case_id: + anyOf: + - type: string + - type: "null" + title: Export Case Id + description: Export case ID associated with the batch + export_ids: + items: + type: string + type: array + title: Export Ids + description: Export IDs successfully queued + results: + items: + $ref: "#/components/schemas/BatchExportResultModel" + type: array + title: Results + description: Per-item batch export results + type: object + required: + - export_ids + - results + title: BatchExportResponse + description: Response model for starting an export batch. + BatchExportResultModel: + properties: + camera: + type: string + title: Camera + description: Camera name for this export attempt + export_id: + anyOf: + - type: string + - type: "null" + title: Export Id + description: The export ID when the export was successfully queued + success: + type: boolean + title: Success + description: Whether the export was successfully queued + status: + anyOf: + - type: string + - type: "null" + title: Status + description: Queue status for this camera export + error: + anyOf: + - type: string + - type: "null" + title: Error + description: Validation or queueing error for this item, if any + item_index: + anyOf: + - type: integer + - type: "null" + title: Item Index + description: Zero-based index of this result within the request items list + client_item_id: + anyOf: + - type: string + - type: "null" + title: Client Item Id + description: Opaque client-supplied item identifier echoed from the request + type: object + required: + - camera + - success + title: BatchExportResultModel + description: Per-item result for a batch export request. EventsSubLabelBody: properties: subLabel: @@ -6523,18 +6736,41 @@ components: required: - subLabel title: EventsSubLabelBody - ExportCaseAssignBody: + ExportBulkDeleteBody: properties: + ids: + items: + type: string + minLength: 1 + type: array + minItems: 1 + title: Ids + type: object + required: + - ids + title: ExportBulkDeleteBody + description: Request body for bulk deleting exports. + ExportBulkReassignBody: + properties: + ids: + items: + type: string + minLength: 1 + type: array + minItems: 1 + title: Ids export_case_id: anyOf: - type: string maxLength: 30 - type: "null" title: Export Case Id - description: "Case ID to assign to the export, or null to unassign" + description: "Case ID to assign to, or null to unassign from current case" type: object - title: ExportCaseAssignBody - description: Request body for assigning or unassigning an export to a case. + required: + - ids + title: ExportBulkReassignBody + description: Request body for bulk reassigning exports to a case. ExportCaseCreateBody: properties: name: diff --git a/frigate/api/auth.py b/frigate/api/auth.py index d3d5a9c5d..d1c968818 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -64,6 +64,7 @@ def require_admin_by_default(): "/logout", # Authenticated user endpoints (allow_any_authenticated) "/profile", + "/profiles", # Public info endpoints (allow_public) "/", "/version", @@ -87,7 +88,9 @@ def require_admin_by_default(): "/go2rtc/streams", "/event_ids", "/events", + "/cases", "/exports", + "/jobs/export", } # Path prefixes that should be exempt (for paths with parameters) @@ -100,7 +103,9 @@ def require_admin_by_default(): "/go2rtc/streams/", # /go2rtc/streams/{camera} "/users/", # /users/{username}/password (has own auth) "/preview/", # /preview/{file}/thumbnail.jpg + "/cases/", # /cases/{case_id} "/exports/", # /exports/{export_id} + "/jobs/export/", # /jobs/export/{export_id} "/vod/", # /vod/{camera_name}/... "/notifications/", # /notifications/pubkey, /notifications/register ) diff --git a/frigate/api/chat.py b/frigate/api/chat.py index 978fa6334..0543d5f8a 100644 --- a/frigate/api/chat.py +++ b/frigate/api/chat.py @@ -3,9 +3,11 @@ import base64 import json import logging +import operator import time from datetime import datetime -from typing import Any, Dict, Generator, List, Optional +from functools import reduce +from typing import Any, Dict, List, Optional import cv2 from fastapi import APIRouter, Body, Depends, Request @@ -17,6 +19,14 @@ from frigate.api.auth import ( get_allowed_cameras_for_filter, require_camera_access, ) +from frigate.api.chat_util import ( + chunk_content, + distance_to_score, + format_events_with_local_time, + fuse_scores, + hydrate_event, + parse_iso_to_timestamp, +) from frigate.api.defs.query.events_query_parameters import EventsQueryParams from frigate.api.defs.request.chat_body import ChatCompletionRequest from frigate.api.defs.response.chat_response import ( @@ -32,55 +42,13 @@ from frigate.jobs.vlm_watch import ( start_vlm_watch_job, stop_vlm_watch_job, ) +from frigate.models import Event logger = logging.getLogger(__name__) router = APIRouter(tags=[Tags.chat]) -def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]: - """Yield content in word-aware chunks for streaming.""" - if not content: - return - words = content.split(" ") - current: List[str] = [] - current_len = 0 - for w in words: - current.append(w) - current_len += len(w) + 1 - if current_len >= chunk_size: - yield " ".join(current) + " " - current = [] - current_len = 0 - if current: - yield " ".join(current) - - -def _format_events_with_local_time( - events_list: List[Dict[str, Any]], -) -> List[Dict[str, Any]]: - """Add human-readable local start/end times to each event for the LLM.""" - result = [] - for evt in events_list: - if not isinstance(evt, dict): - result.append(evt) - continue - copy_evt = dict(evt) - try: - start_ts = evt.get("start_time") - end_ts = evt.get("end_time") - if start_ts is not None: - dt_start = datetime.fromtimestamp(start_ts) - copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p") - if end_ts is not None: - dt_end = datetime.fromtimestamp(end_ts) - copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p") - except (TypeError, ValueError, OSError): - pass - result.append(copy_evt) - return result - - class ToolExecuteRequest(BaseModel): """Request model for tool execution.""" @@ -158,6 +126,76 @@ def get_tool_definitions() -> List[Dict[str, Any]]: "required": [], }, }, + { + "type": "function", + "function": { + "name": "find_similar_objects", + "description": ( + "Find tracked objects that are visually and semantically similar " + "to a specific past event. Use this when the user references a " + "particular object they have seen and wants to find other " + "sightings of the same or similar one ('that green car', 'the " + "person in the red jacket', 'the package that was delivered'). " + "Prefer this over search_objects whenever the user's intent is " + "'find more like this specific one.' Use search_objects first " + "only if you need to locate the anchor event. Requires semantic " + "search to be enabled." + ), + "parameters": { + "type": "object", + "properties": { + "event_id": { + "type": "string", + "description": "The id of the anchor event to find similar objects to.", + }, + "after": { + "type": "string", + "description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').", + }, + "before": { + "type": "string", + "description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').", + }, + "cameras": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional list of cameras to restrict to. Defaults to all.", + }, + "labels": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional list of labels to restrict to. Defaults to the anchor event's label.", + }, + "sub_labels": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional list of sub_labels (names) to restrict to.", + }, + "zones": { + "type": "array", + "items": {"type": "string"}, + "description": "Optional list of zones. An event matches if any of its zones overlap.", + }, + "similarity_mode": { + "type": "string", + "enum": ["visual", "semantic", "fused"], + "description": "Which similarity signal(s) to use. 'fused' (default) combines visual and semantic.", + "default": "fused", + }, + "min_score": { + "type": "number", + "description": "Drop matches with a similarity score below this threshold (0.0-1.0).", + }, + "limit": { + "type": "integer", + "description": "Maximum number of matches to return (default: 10).", + "default": 10, + }, + }, + "required": ["event_id"], + }, + }, + }, { "type": "function", "function": { @@ -434,6 +472,166 @@ async def _execute_search_objects( ) +async def _execute_find_similar_objects( + request: Request, + arguments: Dict[str, Any], + allowed_cameras: List[str], +) -> Dict[str, Any]: + """Execute the find_similar_objects tool. + + Returns a plain dict (not JSONResponse) so the chat loop can embed it + directly in tool-result messages. + """ + # 1. Semantic search enabled? + config = request.app.frigate_config + if not getattr(config.semantic_search, "enabled", False): + return { + "error": "semantic_search_disabled", + "message": ( + "Semantic search must be enabled to find similar objects. " + "Enable it in the Frigate config under semantic_search." + ), + } + + context = request.app.embeddings + if context is None: + return { + "error": "semantic_search_disabled", + "message": "Embeddings context is not available.", + } + + # 2. Anchor lookup. + event_id = arguments.get("event_id") + if not event_id: + return {"error": "missing_event_id", "message": "event_id is required."} + + try: + anchor = Event.get(Event.id == event_id) + except Event.DoesNotExist: + return { + "error": "anchor_not_found", + "message": f"Could not find event {event_id}.", + } + + # 3. Parse params. + after = parse_iso_to_timestamp(arguments.get("after")) + before = parse_iso_to_timestamp(arguments.get("before")) + + cameras = arguments.get("cameras") + if cameras: + # Respect RBAC: intersect with the user's allowed cameras. + cameras = [c for c in cameras if c in allowed_cameras] + else: + cameras = list(allowed_cameras) if allowed_cameras else None + + labels = arguments.get("labels") or [anchor.label] + sub_labels = arguments.get("sub_labels") + zones = arguments.get("zones") + + similarity_mode = arguments.get("similarity_mode", "fused") + if similarity_mode not in ("visual", "semantic", "fused"): + similarity_mode = "fused" + + min_score = arguments.get("min_score") + limit = int(arguments.get("limit", 10)) + limit = max(1, min(limit, 50)) + + # 4. Run similarity searches. We deliberately do NOT pass event_ids into + # the vec queries — the IN filter on sqlite-vec is broken in the installed + # version (see frigate/embeddings/__init__.py). Mirror the pattern used by + # frigate/api/event.py events_search: fetch top-k globally, then intersect + # with the structured filters via Peewee. + visual_distances: Dict[str, float] = {} + description_distances: Dict[str, float] = {} + + try: + if similarity_mode in ("visual", "fused"): + rows = context.search_thumbnail(anchor) + visual_distances = {row[0]: row[1] for row in rows} + + if similarity_mode in ("semantic", "fused"): + query_text = ( + (anchor.data or {}).get("description") + or anchor.sub_label + or anchor.label + ) + rows = context.search_description(query_text) + description_distances = {row[0]: row[1] for row in rows} + except Exception: + logger.exception("Similarity search failed") + return { + "error": "similarity_search_failed", + "message": "Failed to run similarity search.", + } + + vec_ids = set(visual_distances) | set(description_distances) + vec_ids.discard(anchor.id) + # vec layer returns up to k=100 per modality; flag when we hit that ceiling + # so the LLM can mention there may be more matches beyond what we saw. + candidate_truncated = ( + len(visual_distances) >= 100 or len(description_distances) >= 100 + ) + + if not vec_ids: + return { + "anchor": hydrate_event(anchor), + "results": [], + "similarity_mode": similarity_mode, + "candidate_truncated": candidate_truncated, + } + + # 5. Apply structured filters, intersected with vec hits. + clauses = [Event.id.in_(list(vec_ids))] + if after is not None: + clauses.append(Event.start_time >= after) + if before is not None: + clauses.append(Event.start_time <= before) + if cameras: + clauses.append(Event.camera.in_(cameras)) + if labels: + clauses.append(Event.label.in_(labels)) + if sub_labels: + clauses.append(Event.sub_label.in_(sub_labels)) + if zones: + # Mirror the pattern used by frigate/api/event.py for JSON-array zone match. + zone_clauses = [Event.zones.cast("text") % f'*"{zone}"*' for zone in zones] + clauses.append(reduce(operator.or_, zone_clauses)) + + eligible = {e.id: e for e in Event.select().where(reduce(operator.and_, clauses))} + + # 6. Fuse and rank. + scored: List[tuple[str, float]] = [] + for eid in eligible: + v_score = ( + distance_to_score(visual_distances[eid], context.thumb_stats) + if eid in visual_distances + else None + ) + d_score = ( + distance_to_score(description_distances[eid], context.desc_stats) + if eid in description_distances + else None + ) + fused = fuse_scores(v_score, d_score) + if fused is None: + continue + if min_score is not None and fused < min_score: + continue + scored.append((eid, fused)) + + scored.sort(key=lambda pair: pair[1], reverse=True) + scored = scored[:limit] + + results = [hydrate_event(eligible[eid], score=score) for eid, score in scored] + + return { + "anchor": hydrate_event(anchor), + "results": results, + "similarity_mode": similarity_mode, + "candidate_truncated": candidate_truncated, + } + + @router.post( "/chat/execute", dependencies=[Depends(allow_any_authenticated())], @@ -459,6 +657,13 @@ async def execute_tool( if tool_name == "search_objects": return await _execute_search_objects(arguments, allowed_cameras) + if tool_name == "find_similar_objects": + result = await _execute_find_similar_objects( + request, arguments, allowed_cameras + ) + status_code = 200 if "error" not in result else 400 + return JSONResponse(content=result, status_code=status_code) + if tool_name == "set_camera_state": result = await _execute_set_camera_state(request, arguments) return JSONResponse( @@ -642,6 +847,8 @@ async def _execute_tool_internal( except (json.JSONDecodeError, AttributeError) as e: logger.warning(f"Failed to extract tool result: {e}") return {"error": "Failed to parse tool result"} + elif tool_name == "find_similar_objects": + return await _execute_find_similar_objects(request, arguments, allowed_cameras) elif tool_name == "set_camera_state": return await _execute_set_camera_state(request, arguments) elif tool_name == "get_live_context": @@ -664,8 +871,9 @@ async def _execute_tool_internal( return _execute_get_recap(arguments, allowed_cameras) else: logger.error( - "Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context, " - "start_camera_watch, stop_camera_watch, get_profile_status, get_recap. Arguments received: %s", + "Tool call failed: unknown tool %r. Expected one of: search_objects, find_similar_objects, " + "get_live_context, start_camera_watch, stop_camera_watch, get_profile_status, get_recap. " + "Arguments received: %s", tool_name, json.dumps(arguments), ) @@ -927,7 +1135,7 @@ async def _execute_pending_tools( json.dumps(tool_args), ) if tool_name == "search_objects" and isinstance(tool_result, list): - tool_result = _format_events_with_local_time(tool_result) + tool_result = format_events_with_local_time(tool_result) _keys = { "id", "camera", @@ -1080,7 +1288,9 @@ Do not start your response with phrases like "I will check...", "Let me see...", Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields. When users ask about "today", "yesterday", "this week", etc., use the current date above as reference. When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today). -Always be accurate with time calculations based on the current date provided.{cameras_section}""" +Always be accurate with time calculations based on the current date provided. + +When a user refers to a specific object they have seen or describe with identifying details ("that green car", "the person in the red jacket", "a package left today"), prefer the find_similar_objects tool over search_objects. Use search_objects first only to locate the anchor event, then pass its id to find_similar_objects. For generic queries like "show me all cars today", keep using search_objects. If a user message begins with [attached_event:], treat that event id as the anchor for any similarity or "tell me more" request in the same message and call find_similar_objects with that id.{cameras_section}""" conversation.append( { @@ -1118,6 +1328,9 @@ Always be accurate with time calculations based on the current date provided.{ca async def stream_body_llm(): nonlocal conversation, stream_tool_calls, stream_iterations while stream_iterations < max_iterations: + if await request.is_disconnected(): + logger.debug("Client disconnected, stopping chat stream") + return logger.debug( f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) " f"with {len(conversation)} message(s)" @@ -1127,6 +1340,9 @@ Always be accurate with time calculations based on the current date provided.{ca tools=tools if tools else None, tool_choice="auto", ): + if await request.is_disconnected(): + logger.debug("Client disconnected, stopping chat stream") + return kind, value = event if kind == "content_delta": yield ( @@ -1156,6 +1372,11 @@ Always be accurate with time calculations based on the current date provided.{ca msg.get("content"), pending ) ) + if await request.is_disconnected(): + logger.debug( + "Client disconnected before tool execution" + ) + return ( executed_calls, tool_results, @@ -1240,7 +1461,7 @@ Always be accurate with time calculations based on the current date provided.{ca + b"\n" ) # Stream content in word-sized chunks for smooth UX - for part in _chunk_content(final_content): + for part in chunk_content(final_content): yield ( json.dumps({"type": "content", "delta": part}).encode( "utf-8" diff --git a/frigate/api/chat_util.py b/frigate/api/chat_util.py new file mode 100644 index 000000000..743c38e57 --- /dev/null +++ b/frigate/api/chat_util.py @@ -0,0 +1,135 @@ +"""Pure, stateless helpers used by the chat tool dispatchers. + +These were extracted from frigate/api/chat.py to keep that module focused on +route handlers, tool dispatchers, and streaming loop internals. Nothing in +this file touches the FastAPI request, the embeddings context, or the chat +loop state — all inputs and outputs are plain data. +""" + +import logging +import math +import time +from datetime import datetime +from typing import Any, Dict, Generator, List, Optional + +from frigate.embeddings.util import ZScoreNormalization +from frigate.models import Event + +logger = logging.getLogger(__name__) + + +# Similarity fusion weights for find_similar_objects. +# Visual dominates because the feature's primary use case is "same specific object." +# If these change, update the test in test_chat_find_similar_objects.py. +VISUAL_WEIGHT = 0.65 +DESCRIPTION_WEIGHT = 0.35 + + +def chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]: + """Yield content in word-aware chunks for streaming.""" + if not content: + return + words = content.split(" ") + current: List[str] = [] + current_len = 0 + for w in words: + current.append(w) + current_len += len(w) + 1 + if current_len >= chunk_size: + yield " ".join(current) + " " + current = [] + current_len = 0 + if current: + yield " ".join(current) + + +def format_events_with_local_time( + events_list: List[Dict[str, Any]], +) -> List[Dict[str, Any]]: + """Add human-readable local start/end times to each event for the LLM.""" + result = [] + for evt in events_list: + if not isinstance(evt, dict): + result.append(evt) + continue + copy_evt = dict(evt) + try: + start_ts = evt.get("start_time") + end_ts = evt.get("end_time") + if start_ts is not None: + dt_start = datetime.fromtimestamp(start_ts) + copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p") + if end_ts is not None: + dt_end = datetime.fromtimestamp(end_ts) + copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p") + except (TypeError, ValueError, OSError): + pass + result.append(copy_evt) + return result + + +def distance_to_score(distance: float, stats: ZScoreNormalization) -> float: + """Convert a cosine distance to a [0, 1] similarity score. + + Uses the existing ZScoreNormalization stats maintained by EmbeddingsContext + to normalize across deployments, then a bounded sigmoid. Lower distance -> + higher score. If stats are uninitialized (stddev == 0), returns a neutral + 0.5 so the fallback ordering by raw distance still dominates. + """ + if stats.stddev == 0: + return 0.5 + z = (distance - stats.mean) / stats.stddev + # Sigmoid on -z so that small distance (good) -> high score. + return 1.0 / (1.0 + math.exp(z)) + + +def fuse_scores( + visual_score: Optional[float], + description_score: Optional[float], +) -> Optional[float]: + """Weighted fusion of visual and description similarity scores. + + If one side is missing (e.g., no description embedding for this event), + the other side's score is returned alone with no penalty. If both are + missing, returns None and the caller should drop the event. + """ + if visual_score is None and description_score is None: + return None + if visual_score is None: + return description_score + if description_score is None: + return visual_score + return VISUAL_WEIGHT * visual_score + DESCRIPTION_WEIGHT * description_score + + +def parse_iso_to_timestamp(value: Optional[str]) -> Optional[float]: + """Parse an ISO-8601 string as server-local time -> unix timestamp. + + Mirrors the parsing _execute_search_objects uses so both tools accept the + same format from the LLM. + """ + if value is None: + return None + try: + s = value.replace("Z", "").strip()[:19] + dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") + return time.mktime(dt.timetuple()) + except (ValueError, AttributeError, TypeError): + logger.warning("Invalid timestamp format: %s", value) + return None + + +def hydrate_event(event: Event, score: Optional[float] = None) -> Dict[str, Any]: + """Convert an Event row into the dict shape returned by find_similar_objects.""" + data: Dict[str, Any] = { + "id": event.id, + "camera": event.camera, + "label": event.label, + "sub_label": event.sub_label, + "start_time": event.start_time, + "end_time": event.end_time, + "zones": event.zones, + } + if score is not None: + data["score"] = score + return data diff --git a/frigate/api/defs/request/batch_export_body.py b/frigate/api/defs/request/batch_export_body.py new file mode 100644 index 000000000..c0863c885 --- /dev/null +++ b/frigate/api/defs/request/batch_export_body.py @@ -0,0 +1,65 @@ +from typing import List, Optional + +from pydantic import BaseModel, Field, model_validator + +MAX_BATCH_EXPORT_ITEMS = 50 + + +class BatchExportItem(BaseModel): + camera: str = Field(title="Camera name") + start_time: float = Field(title="Start time") + end_time: float = Field(title="End time") + image_path: Optional[str] = Field( + default=None, + title="Existing thumbnail path", + description="Optional existing image to use as the export thumbnail", + ) + friendly_name: Optional[str] = Field( + default=None, + title="Friendly name", + max_length=256, + description="Optional friendly name for this specific export item", + ) + client_item_id: Optional[str] = Field( + default=None, + title="Client item ID", + max_length=128, + description="Optional opaque client identifier echoed back in results", + ) + + +class BatchExportBody(BaseModel): + items: List[BatchExportItem] = Field( + title="Items", + min_length=1, + max_length=MAX_BATCH_EXPORT_ITEMS, + description="List of export items. Each item has its own camera and time range.", + ) + export_case_id: Optional[str] = Field( + default=None, + title="Export case ID", + max_length=30, + description=( + "Existing export case ID to assign all exports to. Attaching to an " + "existing case is temporarily admin-only until case-level ACLs exist." + ), + ) + new_case_name: Optional[str] = Field( + default=None, + title="New case name", + max_length=100, + description="Name of a new export case to create when export_case_id is omitted", + ) + new_case_description: Optional[str] = Field( + default=None, + title="New case description", + description="Optional description for a newly created export case", + ) + + @model_validator(mode="after") + def validate_case_target(self) -> "BatchExportBody": + for item in self.items: + if item.end_time <= item.start_time: + raise ValueError("end_time must be after start_time") + + return self diff --git a/frigate/api/defs/request/export_bulk_body.py b/frigate/api/defs/request/export_bulk_body.py new file mode 100644 index 000000000..004c67d90 --- /dev/null +++ b/frigate/api/defs/request/export_bulk_body.py @@ -0,0 +1,24 @@ +"""Request bodies for bulk export operations.""" + +from typing import Optional + +from pydantic import BaseModel, Field, conlist, constr + + +class ExportBulkDeleteBody(BaseModel): + """Request body for bulk deleting exports.""" + + # List of export IDs with at least one element and each element with at least one char + ids: conlist(constr(min_length=1), min_length=1) + + +class ExportBulkReassignBody(BaseModel): + """Request body for bulk reassigning exports to a case.""" + + # List of export IDs with at least one element and each element with at least one char + ids: conlist(constr(min_length=1), min_length=1) + export_case_id: Optional[str] = Field( + default=None, + max_length=30, + description="Case ID to assign to, or null to unassign from current case", + ) diff --git a/frigate/api/defs/request/export_case_body.py b/frigate/api/defs/request/export_case_body.py index 35cd8ff7f..66cba58ea 100644 --- a/frigate/api/defs/request/export_case_body.py +++ b/frigate/api/defs/request/export_case_body.py @@ -23,13 +23,3 @@ class ExportCaseUpdateBody(BaseModel): description: Optional[str] = Field( default=None, description="Updated description of the export case" ) - - -class ExportCaseAssignBody(BaseModel): - """Request body for assigning or unassigning an export to a case.""" - - export_case_id: Optional[str] = Field( - default=None, - max_length=30, - description="Case ID to assign to the export, or null to unassign", - ) diff --git a/frigate/api/defs/response/export_response.py b/frigate/api/defs/response/export_response.py index 600794f97..b796ba9ac 100644 --- a/frigate/api/defs/response/export_response.py +++ b/frigate/api/defs/response/export_response.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import Any, List, Optional from pydantic import BaseModel, Field @@ -28,6 +28,88 @@ class StartExportResponse(BaseModel): export_id: Optional[str] = Field( default=None, description="The export ID if successfully started" ) + status: Optional[str] = Field( + default=None, + description="Queue status for the export job", + ) + + +class BatchExportResultModel(BaseModel): + """Per-item result for a batch export request.""" + + camera: str = Field(description="Camera name for this export attempt") + export_id: Optional[str] = Field( + default=None, + description="The export ID when the export was successfully queued", + ) + success: bool = Field(description="Whether the export was successfully queued") + status: Optional[str] = Field( + default=None, + description="Queue status for this camera export", + ) + error: Optional[str] = Field( + default=None, + description="Validation or queueing error for this item, if any", + ) + item_index: Optional[int] = Field( + default=None, + description="Zero-based index of this result within the request items list", + ) + client_item_id: Optional[str] = Field( + default=None, + description="Opaque client-supplied item identifier echoed from the request", + ) + + +class BatchExportResponse(BaseModel): + """Response model for starting an export batch.""" + + export_case_id: Optional[str] = Field( + default=None, + description="Export case ID associated with the batch", + ) + export_ids: List[str] = Field(description="Export IDs successfully queued") + results: List[BatchExportResultModel] = Field( + description="Per-item batch export results" + ) + + +class ExportJobModel(BaseModel): + """Model representing a queued or running export job.""" + + id: str = Field(description="Unique identifier for the export job") + job_type: str = Field(description="Job type") + status: str = Field(description="Current job status") + camera: str = Field(description="Camera associated with this export job") + name: Optional[str] = Field( + default=None, + description="Friendly name for the export", + ) + export_case_id: Optional[str] = Field( + default=None, + description="ID of the export case this export belongs to", + ) + request_start_time: float = Field(description="Requested export start time") + request_end_time: float = Field(description="Requested export end time") + start_time: Optional[float] = Field( + default=None, + description="Unix timestamp when execution started", + ) + end_time: Optional[float] = Field( + default=None, + description="Unix timestamp when execution completed", + ) + error_message: Optional[str] = Field( + default=None, + description="Error message for failed jobs", + ) + results: Optional[dict[str, Any]] = Field( + default=None, + description="Result metadata for completed jobs", + ) + + +ExportJobsResponse = List[ExportJobModel] ExportsResponse = List[ExportModel] diff --git a/frigate/api/export.py b/frigate/api/export.py index 056a0613f..714420903 100644 --- a/frigate/api/export.py +++ b/frigate/api/export.py @@ -1,8 +1,10 @@ """Export apis.""" +import datetime import logging import random import string +import time from pathlib import Path from typing import List, Optional @@ -16,11 +18,19 @@ from playhouse.shortcuts import model_to_dict from frigate.api.auth import ( allow_any_authenticated, get_allowed_cameras_for_filter, + get_current_user, require_camera_access, require_role, ) +from frigate.api.defs.request.batch_export_body import ( + BatchExportBody, + BatchExportItem, +) +from frigate.api.defs.request.export_bulk_body import ( + ExportBulkDeleteBody, + ExportBulkReassignBody, +) from frigate.api.defs.request.export_case_body import ( - ExportCaseAssignBody, ExportCaseCreateBody, ExportCaseUpdateBody, ) @@ -34,6 +44,9 @@ from frigate.api.defs.response.export_case_response import ( ExportCasesResponse, ) from frigate.api.defs.response.export_response import ( + BatchExportResponse, + ExportJobModel, + ExportJobsResponse, ExportModel, ExportsResponse, StartExportResponse, @@ -41,11 +54,19 @@ from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.tags import Tags from frigate.const import CLIPS_DIR, EXPORT_DIR +from frigate.jobs.export import ( + ExportJob, + ExportQueueFullError, + available_export_queue_slots, + cancel_queued_export_jobs_for_case, + get_export_job, + list_active_export_jobs, + start_export_job, +) from frigate.models import Export, ExportCase, Previews, Recordings from frigate.record.export import ( DEFAULT_TIME_LAPSE_FFMPEG_ARGS, PlaybackSourceEnum, - RecordingExporter, validate_ffmpeg_args, ) from frigate.util.time import is_current_hour @@ -55,6 +76,209 @@ logger = logging.getLogger(__name__) router = APIRouter(tags=[Tags.export]) +def _generate_id(length: int = 12) -> str: + return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) + + +def _generate_export_id(camera_name: str) -> str: + return f"{camera_name}_{_generate_id(6)}" + + +def _create_export_case_record( + name: str, + description: Optional[str], +) -> ExportCase: + now = datetime.datetime.fromtimestamp(time.time()) + return ExportCase.create( + id=_generate_id(), + name=name, + description=description, + created_at=now, + updated_at=now, + ) + + +def _validate_camera_name(request: Request, camera_name: str) -> Optional[JSONResponse]: + if camera_name and request.app.frigate_config.cameras.get(camera_name): + return None + + return JSONResponse( + content={"success": False, "message": f"{camera_name} is not a valid camera."}, + status_code=404, + ) + + +def _validate_export_case(export_case_id: Optional[str]) -> Optional[JSONResponse]: + if export_case_id is None: + return None + + try: + ExportCase.get(ExportCase.id == export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found"}, + status_code=404, + ) + + return None + + +def _sanitize_existing_image( + image_path: Optional[str], +) -> tuple[Optional[str], Optional[JSONResponse]]: + existing_image = sanitize_filepath(image_path) if image_path else None + + if existing_image and not existing_image.startswith(CLIPS_DIR): + return None, JSONResponse( + content={"success": False, "message": "Invalid image path"}, + status_code=400, + ) + + return existing_image, None + + +def _validate_export_source( + camera_name: str, + start_time: float, + end_time: float, + playback_source: PlaybackSourceEnum, +) -> Optional[str]: + if playback_source == PlaybackSourceEnum.recordings: + recordings_count = ( + Recordings.select() + .where( + Recordings.start_time.between(start_time, end_time) + | Recordings.end_time.between(start_time, end_time) + | ( + (start_time > Recordings.start_time) + & (end_time < Recordings.end_time) + ) + ) + .where(Recordings.camera == camera_name) + .count() + ) + + if recordings_count <= 0: + return "No recordings found for time range" + + return None + + previews_count = ( + Previews.select() + .where( + Previews.start_time.between(start_time, end_time) + | Previews.end_time.between(start_time, end_time) + | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) + ) + .where(Previews.camera == camera_name) + .count() + ) + + if not is_current_hour(start_time) and previews_count <= 0: + return "No previews found for time range" + + return None + + +def _get_item_recording_export_errors( + request: Request, + items: list[BatchExportItem], +) -> dict[int, str]: + """Return {item_index: error message} for items with invalid state. + + Checks camera configuration and recording presence per item. Groups by + camera and issues one query per unique camera covering that camera's + full requested range, then checks each item's range against the returned + rows in Python. This avoids O(N) DB round-trips on large batches. + """ + configured_cameras = request.app.frigate_config.cameras + errors: dict[int, str] = {} + + # Validate camera configuration first + item_ranges_by_camera: dict[str, list[tuple[int, float, float]]] = {} + for index, item in enumerate(items): + if not configured_cameras.get(item.camera): + errors[index] = f"{item.camera} is not a valid camera." + continue + item_ranges_by_camera.setdefault(item.camera, []).append( + (index, item.start_time, item.end_time) + ) + + if not item_ranges_by_camera: + return errors + + # For each camera, fetch recordings that cover the union of ranges + for camera_name, indexed_ranges in item_ranges_by_camera.items(): + min_start = min(r[1] for r in indexed_ranges) + max_end = max(r[2] for r in indexed_ranges) + + recording_ranges = list( + Recordings.select(Recordings.start_time, Recordings.end_time) + .where( + Recordings.camera == camera_name, + Recordings.start_time.between(min_start, max_end) + | Recordings.end_time.between(min_start, max_end) + | ( + (min_start > Recordings.start_time) + & (max_end < Recordings.end_time) + ), + ) + .iterator() + ) + + for index, start_time, end_time in indexed_ranges: + has_recording = any( + ( + start_time <= rec.start_time <= end_time + or start_time <= rec.end_time <= end_time + or (start_time > rec.start_time and end_time < rec.end_time) + ) + for rec in recording_ranges + ) + if not has_recording: + errors[index] = "No recordings found for time range" + + return errors + + +def _build_export_job( + camera_name: str, + start_time: float, + end_time: float, + friendly_name: Optional[str], + existing_image: Optional[str], + playback_source: PlaybackSourceEnum, + export_case_id: Optional[str], + ffmpeg_input_args: Optional[str] = None, + ffmpeg_output_args: Optional[str] = None, + cpu_fallback: bool = False, +) -> ExportJob: + return ExportJob( + id=_generate_export_id(camera_name), + camera=camera_name, + name=friendly_name, + image_path=existing_image, + export_case_id=export_case_id, + request_start_time=int(start_time), + request_end_time=int(end_time), + playback_source=playback_source.value, + ffmpeg_input_args=ffmpeg_input_args, + ffmpeg_output_args=ffmpeg_output_args, + cpu_fallback=cpu_fallback, + ) + + +def _export_case_to_dict(case: ExportCase) -> dict[str, object]: + case_dict = model_to_dict(case) + + for field in ("created_at", "updated_at"): + value = case_dict.get(field) + if isinstance(value, datetime.datetime): + case_dict[field] = value.timestamp() + + return case_dict + + @router.get( "/exports", response_model=ExportsResponse, @@ -103,10 +327,8 @@ def get_exports( description="Gets all export cases from the database.", ) def get_export_cases(): - cases = ( - ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator() - ) - return JSONResponse(content=[c for c in cases]) + cases = ExportCase.select().order_by(ExportCase.created_at.desc()).iterator() + return JSONResponse(content=[_export_case_to_dict(case) for case in cases]) @router.post( @@ -117,14 +339,8 @@ def get_export_cases(): description="Creates a new export case.", ) def create_export_case(body: ExportCaseCreateBody): - case = ExportCase.create( - id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)), - name=body.name, - description=body.description, - created_at=Path().stat().st_mtime, - updated_at=Path().stat().st_mtime, - ) - return JSONResponse(content=model_to_dict(case)) + case = _create_export_case_record(body.name, body.description) + return JSONResponse(content=_export_case_to_dict(case)) @router.get( @@ -137,7 +353,7 @@ def create_export_case(body: ExportCaseCreateBody): def get_export_case(case_id: str): try: case = ExportCase.get(ExportCase.id == case_id) - return JSONResponse(content=model_to_dict(case)) + return JSONResponse(content=_export_case_to_dict(case)) except DoesNotExist: return JSONResponse( content={"success": False, "message": "Export case not found"}, @@ -166,6 +382,8 @@ def update_export_case(case_id: str, body: ExportCaseUpdateBody): if body.description is not None: case.description = body.description + case.updated_at = datetime.datetime.fromtimestamp(time.time()) + case.save() return JSONResponse( @@ -180,7 +398,7 @@ def update_export_case(case_id: str, body: ExportCaseUpdateBody): summary="Delete export case", description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """, ) -def delete_export_case(case_id: str): +def delete_export_case(case_id: str, request: Request, delete_exports: bool = False): try: case = ExportCase.get(ExportCase.id == case_id) except DoesNotExist: @@ -189,8 +407,18 @@ def delete_export_case(case_id: str): status_code=404, ) - # Unassign exports from this case but keep the exports themselves - Export.update(export_case=None).where(Export.export_case == case).execute() + if delete_exports: + cancel_queued_export_jobs_for_case(request.app.frigate_config, case_id) + + exports = list(Export.select().where(Export.export_case == case_id)) + for export in exports: + Path(export.video_path).unlink(missing_ok=True) + if export.thumb_path: + Path(export.thumb_path).unlink(missing_ok=True) + export.delete_instance() + else: + # Unassign exports from this case but keep the exports themselves + Export.update(export_case=None).where(Export.export_case == case_id).execute() case.delete_instance() @@ -199,45 +427,214 @@ def delete_export_case(case_id: str): ) -@router.patch( - "/export/{export_id}/case", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Assign export to case", - description=( - "Assigns an export to a case, or unassigns it if export_case_id is null." - ), +@router.get( + "/jobs/export", + response_model=ExportJobsResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Get active export jobs", + description="Gets queued and running export jobs.", ) -async def assign_export_case( - export_id: str, - body: ExportCaseAssignBody, +def get_active_export_jobs( request: Request, + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), ): - try: - export: Export = Export.get(Export.id == export_id) - await require_camera_access(export.camera, request=request) - except DoesNotExist: + jobs = list_active_export_jobs(request.app.frigate_config) + return JSONResponse( + content=[job.to_dict() for job in jobs if job.camera in allowed_cameras] + ) + + +@router.get( + "/jobs/export/{export_id}", + response_model=ExportJobModel, + dependencies=[Depends(allow_any_authenticated())], + summary="Get export job status", + description="Gets queued, running, or completed status for a specific export job.", +) +async def get_export_job_status(export_id: str, request: Request): + job = get_export_job(request.app.frigate_config, export_id) + if job is None: return JSONResponse( - content={"success": False, "message": "Export not found."}, + content={"success": False, "message": "Job not found"}, status_code=404, ) - if body.export_case_id is not None: - try: - ExportCase.get(ExportCase.id == body.export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found."}, - status_code=404, - ) - export.export_case = body.export_case_id - else: - export.export_case = None + await require_camera_access(job.camera, request=request) - export.save() + return JSONResponse(content=job.to_dict()) + + +@router.post( + "/exports/batch", + response_model=BatchExportResponse, + dependencies=[Depends(allow_any_authenticated())], + summary="Start recording export batch", + description=( + "Starts recording exports for a batch of items, each with its own camera " + "and time range, and assigns them to a single export case. Attaching to " + "an existing case is temporarily admin-only until case-level ACLs exist." + ), +) +def export_recordings_batch( + request: Request, + body: BatchExportBody, + allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), + current_user: dict = Depends(get_current_user), +): + if isinstance(current_user, JSONResponse): + return current_user + + # Stopgap: attaching to an existing case remains admin-only until + # case-level ACLs exist. Non-admins can still create a fresh case + # as a side effect of queueing items they already have camera access to. + if body.export_case_id is not None and current_user["role"] != "admin": + return JSONResponse( + content={ + "success": False, + "message": "Only admins can attach exports to an existing case.", + }, + status_code=403, + ) + + case_validation_error = _validate_export_case(body.export_case_id) + if case_validation_error is not None: + return case_validation_error + + # Fail-closed camera access: any item referencing an inaccessible + # camera rejects the whole request. The UI's review list is already + # filtered by camera access, so reaching this branch implies a stale + # session or a crafted request — reject loudly rather than silently + # dropping items. + allowed_camera_set = set(allowed_cameras) + for item in body.items: + if item.camera not in allowed_camera_set: + return JSONResponse( + content={ + "success": False, + "message": f"Cannot export from {item.camera}: access denied", + }, + status_code=403, + ) + + # Sanitize each item's image_path up front. A bad path in any item + # kills the whole request, consistent with single-export behavior. + sanitized_images: list[Optional[str]] = [] + for item in body.items: + existing_image, image_validation_error = _sanitize_existing_image( + item.image_path + ) + if image_validation_error is not None: + return image_validation_error + sanitized_images.append(existing_image) + + item_errors = _get_item_recording_export_errors(request, body.items) + + queueable_indexes = [ + index for index in range(len(body.items)) if index not in item_errors + ] + + if not queueable_indexes: + return JSONResponse( + content={ + "success": False, + "message": ( + "No exports could be queued: no recordings found for the " + "requested ranges." + ), + }, + status_code=400, + ) + + # Preflight admission: reject the whole batch if we can't fit every + # queueable item. Prevents partial batches where the tail fails with + # "queue full" after we've already created a case. + if available_export_queue_slots(request.app.frigate_config) < len( + queueable_indexes + ): + return JSONResponse( + content={ + "success": False, + "message": "Export queue is full. Try again once current exports finish.", + }, + status_code=503, + ) + + export_case = None + export_case_id = body.export_case_id + if export_case_id is None and body.new_case_name: + export_case = _create_export_case_record( + body.new_case_name, + body.new_case_description, + ) + export_case_id = export_case.id + + export_ids: list[str] = [] + results: list[dict[str, Optional[str] | bool | int]] = [] + for index, item in enumerate(body.items): + if index in item_errors: + results.append( + { + "camera": item.camera, + "export_id": None, + "success": False, + "status": None, + "error": item_errors[index], + "item_index": index, + "client_item_id": item.client_item_id, + } + ) + continue + + export_job = _build_export_job( + item.camera, + item.start_time, + item.end_time, + item.friendly_name, + sanitized_images[index], + PlaybackSourceEnum.recordings, + export_case_id, + ) + try: + start_export_job(request.app.frigate_config, export_job) + except Exception: + logger.exception("Failed to queue export job %s", export_job.id) + results.append( + { + "camera": item.camera, + "export_id": None, + "success": False, + "status": None, + "error": "Failed to queue export job", + "item_index": index, + "client_item_id": item.client_item_id, + } + ) + continue + + export_ids.append(export_job.id) + results.append( + { + "camera": item.camera, + "export_id": export_job.id, + "success": True, + "status": "queued", + "error": None, + "item_index": index, + "client_item_id": item.client_item_id, + } + ) + + if export_case is not None and not export_ids: + export_case.delete_instance() + export_case_id = None return JSONResponse( - content={"success": True, "message": "Successfully updated export case."} + content={ + "export_case_id": export_case_id, + "export_ids": export_ids, + "results": results, + }, + status_code=202, ) @@ -257,104 +654,82 @@ def export_recording( start_time: float, end_time: float, body: ExportRecordingsBody, + current_user: dict = Depends(get_current_user), ): - if not camera_name or not request.app.frigate_config.cameras.get(camera_name): - return JSONResponse( - content=( - {"success": False, "message": f"{camera_name} is not a valid camera."} - ), - status_code=404, - ) + if isinstance(current_user, JSONResponse): + return current_user + + camera_validation_error = _validate_camera_name(request, camera_name) + if camera_validation_error is not None: + return camera_validation_error playback_source = body.source friendly_name = body.name - existing_image = sanitize_filepath(body.image_path) if body.image_path else None + existing_image, image_validation_error = _sanitize_existing_image(body.image_path) + if image_validation_error is not None: + return image_validation_error export_case_id = body.export_case_id - if export_case_id is not None: - try: - ExportCase.get(ExportCase.id == export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) - # Ensure that existing_image is a valid path - if existing_image and not existing_image.startswith(CLIPS_DIR): + # Attaching to an existing case requires admin. Single-export for + # cameras the user can access is otherwise non-admin; we only gate + # the case-attachment side effect. + if export_case_id is not None and current_user["role"] != "admin": return JSONResponse( - content=({"success": False, "message": "Invalid image path"}), + content={ + "success": False, + "message": "Only admins can attach exports to an existing case.", + }, + status_code=403, + ) + + case_validation_error = _validate_export_case(export_case_id) + if case_validation_error is not None: + return case_validation_error + + source_error = _validate_export_source( + camera_name, + start_time, + end_time, + playback_source, + ) + if source_error is not None: + return JSONResponse( + content={"success": False, "message": source_error}, status_code=400, ) - if playback_source == "recordings": - recordings_count = ( - Recordings.select() - .where( - Recordings.start_time.between(start_time, end_time) - | Recordings.end_time.between(start_time, end_time) - | ( - (start_time > Recordings.start_time) - & (end_time < Recordings.end_time) - ) - ) - .where(Recordings.camera == camera_name) - .count() - ) - - if recordings_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No recordings found for time range"} - ), - status_code=400, - ) - else: - previews_count = ( - Previews.select() - .where( - Previews.start_time.between(start_time, end_time) - | Previews.end_time.between(start_time, end_time) - | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) - ) - .where(Previews.camera == camera_name) - .count() - ) - - if not is_current_hour(start_time) and previews_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No previews found for time range"} - ), - status_code=400, - ) - - export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" - exporter = RecordingExporter( - request.app.frigate_config, - export_id, + export_job = _build_export_job( camera_name, + start_time, + end_time, friendly_name, existing_image, - int(start_time), - int(end_time), - ( - PlaybackSourceEnum[playback_source] - if playback_source in PlaybackSourceEnum.__members__.values() - else PlaybackSourceEnum.recordings - ), + playback_source, export_case_id, ) - exporter.start() + try: + start_export_job(request.app.frigate_config, export_job) + except ExportQueueFullError: + logger.warning("Export queue is full; rejecting %s", export_job.id) + return JSONResponse( + content={ + "success": False, + "message": "Export queue is full. Try again once current exports finish.", + }, + status_code=503, + ) + return JSONResponse( content=( { "success": True, - "message": "Starting export of recording.", - "export_id": export_id, + "message": "Export queued.", + "export_id": export_job.id, + "status": "queued", } ), - status_code=200, + status_code=202, ) @@ -395,65 +770,6 @@ async def export_rename(event_id: str, body: ExportRenameBody, request: Request) ) -@router.delete( - "/export/{event_id}", - response_model=GenericResponse, - dependencies=[Depends(require_role(["admin"]))], - summary="Delete export", -) -async def export_delete(event_id: str, request: Request): - try: - export: Export = Export.get(Export.id == event_id) - await require_camera_access(export.camera, request=request) - except DoesNotExist: - return JSONResponse( - content=( - { - "success": False, - "message": "Export not found.", - } - ), - status_code=404, - ) - - files_in_use = [] - for process in psutil.process_iter(): - try: - if process.name() != "ffmpeg": - continue - file_list = process.open_files() - if file_list: - for nt in file_list: - if nt.path.startswith(EXPORT_DIR): - files_in_use.append(nt.path.split("/")[-1]) - except psutil.Error: - continue - - if export.video_path.split("/")[-1] in files_in_use: - return JSONResponse( - content=( - {"success": False, "message": "Can not delete in progress export."} - ), - status_code=400, - ) - - Path(export.video_path).unlink(missing_ok=True) - - if export.thumb_path: - Path(export.thumb_path).unlink(missing_ok=True) - - export.delete_instance() - return JSONResponse( - content=( - { - "success": True, - "message": "Successfully deleted export.", - } - ), - status_code=200, - ) - - @router.post( "/export/custom/{camera_name}/start/{start_time}/end/{end_time}", response_model=StartExportResponse, @@ -472,82 +788,36 @@ def export_recording_custom( end_time: float, body: ExportRecordingsCustomBody, ): - if not camera_name or not request.app.frigate_config.cameras.get(camera_name): - return JSONResponse( - content=( - {"success": False, "message": f"{camera_name} is not a valid camera."} - ), - status_code=404, - ) + camera_validation_error = _validate_camera_name(request, camera_name) + if camera_validation_error is not None: + return camera_validation_error playback_source = body.source friendly_name = body.name - existing_image = sanitize_filepath(body.image_path) if body.image_path else None + existing_image, image_validation_error = _sanitize_existing_image(body.image_path) + if image_validation_error is not None: + return image_validation_error ffmpeg_input_args = body.ffmpeg_input_args ffmpeg_output_args = body.ffmpeg_output_args cpu_fallback = body.cpu_fallback export_case_id = body.export_case_id - if export_case_id is not None: - try: - ExportCase.get(ExportCase.id == export_case_id) - except DoesNotExist: - return JSONResponse( - content={"success": False, "message": "Export case not found"}, - status_code=404, - ) + case_validation_error = _validate_export_case(export_case_id) + if case_validation_error is not None: + return case_validation_error - # Ensure that existing_image is a valid path - if existing_image and not existing_image.startswith(CLIPS_DIR): + source_error = _validate_export_source( + camera_name, + start_time, + end_time, + playback_source, + ) + if source_error is not None: return JSONResponse( - content=({"success": False, "message": "Invalid image path"}), + content={"success": False, "message": source_error}, status_code=400, ) - if playback_source == "recordings": - recordings_count = ( - Recordings.select() - .where( - Recordings.start_time.between(start_time, end_time) - | Recordings.end_time.between(start_time, end_time) - | ( - (start_time > Recordings.start_time) - & (end_time < Recordings.end_time) - ) - ) - .where(Recordings.camera == camera_name) - .count() - ) - - if recordings_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No recordings found for time range"} - ), - status_code=400, - ) - else: - previews_count = ( - Previews.select() - .where( - Previews.start_time.between(start_time, end_time) - | Previews.end_time.between(start_time, end_time) - | ((start_time > Previews.start_time) & (end_time < Previews.end_time)) - ) - .where(Previews.camera == camera_name) - .count() - ) - - if not is_current_hour(start_time) and previews_count <= 0: - return JSONResponse( - content=( - {"success": False, "message": "No previews found for time range"} - ), - status_code=400, - ) - - export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}" - # Validate user-provided ffmpeg args to prevent injection. # Admin users are trusted and skip validation. is_admin = request.headers.get("remote-role", "") == "admin" @@ -577,34 +847,40 @@ def export_recording_custom( if ffmpeg_output_args is None: ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS - exporter = RecordingExporter( - request.app.frigate_config, - export_id, + export_job = _build_export_job( camera_name, + start_time, + end_time, friendly_name, existing_image, - int(start_time), - int(end_time), - ( - PlaybackSourceEnum[playback_source] - if playback_source in PlaybackSourceEnum.__members__.values() - else PlaybackSourceEnum.recordings - ), + playback_source, export_case_id, ffmpeg_input_args, ffmpeg_output_args, cpu_fallback, ) - exporter.start() + try: + start_export_job(request.app.frigate_config, export_job) + except ExportQueueFullError: + logger.warning("Export queue is full; rejecting %s", export_job.id) + return JSONResponse( + content={ + "success": False, + "message": "Export queue is full. Try again once current exports finish.", + }, + status_code=503, + ) + return JSONResponse( content=( { "success": True, - "message": "Starting export of recording.", - "export_id": export_id, + "message": "Export queued.", + "export_id": export_job.id, + "status": "queued", } ), - status_code=200, + status_code=202, ) @@ -626,3 +902,102 @@ async def get_export(export_id: str, request: Request): content={"success": False, "message": "Export not found"}, status_code=404, ) + + +def _get_files_in_use() -> set[str]: + """Get set of export filenames currently in use by ffmpeg.""" + files_in_use: set[str] = set() + for process in psutil.process_iter(): + try: + if process.name() != "ffmpeg": + continue + file_list = process.open_files() + if file_list: + for nt in file_list: + if nt.path.startswith(EXPORT_DIR): + files_in_use.add(nt.path.split("/")[-1]) + except psutil.Error: + continue + return files_in_use + + +@router.post( + "/exports/delete", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Bulk delete exports", + description="Deletes one or more exports by ID. All IDs must exist and none can be in-progress.", +) +def bulk_delete_exports(body: ExportBulkDeleteBody): + exports = list(Export.select().where(Export.id << body.ids)) + + if len(exports) != len(body.ids): + return JSONResponse( + content={"success": False, "message": "One or more exports not found."}, + status_code=404, + ) + + files_in_use = _get_files_in_use() + + for export in exports: + if export.video_path.split("/")[-1] in files_in_use: + return JSONResponse( + content={ + "success": False, + "message": "Can not delete in-progress export.", + }, + status_code=400, + ) + + for export in exports: + Path(export.video_path).unlink(missing_ok=True) + if export.thumb_path: + Path(export.thumb_path).unlink(missing_ok=True) + + Export.delete().where(Export.id << body.ids).execute() + + return JSONResponse( + content={ + "success": True, + "message": f"Successfully deleted {len(exports)} export(s).", + }, + status_code=200, + ) + + +@router.post( + "/exports/reassign", + response_model=GenericResponse, + dependencies=[Depends(require_role(["admin"]))], + summary="Bulk reassign exports to a case", + description="Assigns or unassigns one or more exports to/from a case. All IDs must exist.", +) +def bulk_reassign_exports(body: ExportBulkReassignBody): + exports = list(Export.select().where(Export.id << body.ids)) + + if len(exports) != len(body.ids): + return JSONResponse( + content={"success": False, "message": "One or more exports not found."}, + status_code=404, + ) + + if body.export_case_id is not None: + try: + ExportCase.get(ExportCase.id == body.export_case_id) + except DoesNotExist: + return JSONResponse( + content={"success": False, "message": "Export case not found."}, + status_code=404, + ) + + Export.update(export_case=body.export_case_id).where( + Export.id << body.ids + ).execute() + + return JSONResponse( + content={ + "success": True, + "message": f"Successfully updated {len(exports)} export(s).", + }, + status_code=200, + ) diff --git a/frigate/app.py b/frigate/app.py index 750f1ad23..0ead74268 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -52,6 +52,7 @@ from frigate.embeddings import EmbeddingProcess, EmbeddingsContext from frigate.events.audio import AudioProcessor from frigate.events.cleanup import EventCleanup from frigate.events.maintainer import EventProcessor +from frigate.jobs.export import reap_stale_exports from frigate.jobs.motion_search import stop_all_motion_search_jobs from frigate.log import _stop_logging from frigate.models import ( @@ -611,6 +612,11 @@ class FrigateApp: # Clean up any stale replay camera artifacts (filesystem + DB) cleanup_replay_cameras() + # Reap any Export rows still marked in_progress from a previous + # session (crash, kill, broken migration). Runs synchronously before + # uvicorn binds so no API request can observe a stale row. + reap_stale_exports() + self.init_inter_process_communicator() self.start_detectors() self.init_dispatcher() diff --git a/frigate/config/camera/record.py b/frigate/config/camera/record.py index 7eae7500d..1f7afc6ce 100644 --- a/frigate/config/camera/record.py +++ b/frigate/config/camera/record.py @@ -92,6 +92,12 @@ class RecordExportConfig(FrigateBaseModel): title="Export hwaccel args", description="Hardware acceleration args to use for export/transcode operations.", ) + max_concurrent: int = Field( + default=3, + ge=1, + title="Maximum concurrent exports", + description="Maximum number of export jobs to process at the same time.", + ) class RecordConfig(FrigateBaseModel): diff --git a/frigate/data_processing/real_time/api.py b/frigate/data_processing/real_time/api.py index 31127220f..b9b7ba26e 100644 --- a/frigate/data_processing/real_time/api.py +++ b/frigate/data_processing/real_time/api.py @@ -1,8 +1,12 @@ """Local only processors for handling real time object processing.""" import logging +import threading from abc import ABC, abstractmethod -from typing import Any +from collections import deque +from concurrent.futures import Future +from queue import Empty, Full, Queue +from typing import Any, Callable import numpy as np @@ -74,3 +78,123 @@ class RealTimeProcessorApi(ABC): payload: The updated configuration object. """ pass + + def drain_results(self) -> list[dict[str, Any]]: + """Return pending results that need IPC side-effects. + + Deferred processors accumulate results on a worker thread. + The maintainer calls this each loop iteration to collect them + and perform publishes on the main thread. + + Synchronous processors return an empty list (default). + """ + return [] + + def shutdown(self) -> None: + """Stop any background work and release resources. + + Called when the processor is being removed or the maintainer + is shutting down. Default is a no-op for synchronous processors. + """ + pass + + +class DeferredRealtimeProcessorApi(RealTimeProcessorApi): + """Base class for processors that offload heavy work to a background thread. + + Subclasses implement: + - process_frame(): do cheap gating + crop + copy, then call _enqueue_task() + - _process_task(task): heavy work (inference, consensus) on the worker thread + - handle_request(): optionally use _enqueue_request() for sync request/response + - expire_object(): call _enqueue_task() with a control message + + The worker thread owns all processor state. No locks are needed because + only the worker mutates state. Results that need IPC are placed in + _pending_results via _emit_result(), and the maintainer drains them + each loop iteration. + """ + + def __init__( + self, + config: FrigateConfig, + metrics: DataProcessorMetrics, + max_queue: int = 8, + ) -> None: + super().__init__(config, metrics) + self._task_queue: Queue = Queue(maxsize=max_queue) + self._pending_results: deque[dict[str, Any]] = deque() + self._results_lock = threading.Lock() + self._stop_event = threading.Event() + self._worker = threading.Thread( + target=self._drain_loop, + daemon=True, + name=f"{type(self).__name__}_worker", + ) + self._worker.start() + + def _drain_loop(self) -> None: + """Worker thread main loop — drains the task queue until stopped.""" + while not self._stop_event.is_set(): + try: + task = self._task_queue.get(timeout=0.5) + except Empty: + continue + + if ( + isinstance(task, tuple) + and len(task) == 2 + and isinstance(task[1], Future) + ): + # Request/response: (callable_and_args, future) + (func, args), future = task + try: + result = func(args) + future.set_result(result) + except Exception as e: + future.set_exception(e) + else: + try: + self._process_task(task) + except Exception: + logger.exception("Error processing deferred task") + + def _enqueue_task(self, task: Any) -> bool: + """Enqueue a task for the worker. Returns False if queue is full (dropped).""" + try: + self._task_queue.put_nowait(task) + return True + except Full: + logger.debug("Deferred processor queue full, dropping task") + return False + + def _enqueue_request(self, func: Callable, args: Any, timeout: float = 10.0) -> Any: + """Enqueue a request and block until the worker returns a result.""" + future: Future = Future() + self._task_queue.put(((func, args), future), timeout=timeout) + return future.result(timeout=timeout) + + def _emit_result(self, result: dict[str, Any]) -> None: + """Called by the worker thread to stage a result for the maintainer.""" + with self._results_lock: + self._pending_results.append(result) + + def drain_results(self) -> list[dict[str, Any]]: + """Called by the maintainer on the main thread to collect pending results.""" + with self._results_lock: + results = list(self._pending_results) + self._pending_results.clear() + return results + + def shutdown(self) -> None: + """Signal the worker to stop and wait for it to finish.""" + self._stop_event.set() + self._worker.join(timeout=5.0) + + @abstractmethod + def _process_task(self, task: Any) -> None: + """Process a single task on the worker thread. + + Subclasses implement inference, consensus, training image saves here. + Call _emit_result() to stage results for the maintainer to publish. + """ + pass diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 1dcf59052..e3b0e23ed 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -1,7 +1,6 @@ """Real time processor that works with classification tflite models.""" import datetime -import json import logging import os from typing import Any @@ -10,25 +9,18 @@ import cv2 import numpy as np from frigate.comms.embeddings_updater import EmbeddingsRequestEnum -from frigate.comms.event_metadata_updater import ( - EventMetadataPublisher, - EventMetadataTypeEnum, -) +from frigate.comms.event_metadata_updater import EventMetadataPublisher from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig -from frigate.config.classification import ( - CustomClassificationConfig, - ObjectClassificationType, -) +from frigate.config.classification import CustomClassificationConfig from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR from frigate.log import suppress_stderr_during -from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels from frigate.util.image import calculate_region from frigate.util.object import box_overlaps from ..types import DataProcessorMetrics -from .api import RealTimeProcessorApi +from .api import DeferredRealtimeProcessorApi try: from tflite_runtime.interpreter import Interpreter @@ -40,7 +32,7 @@ logger = logging.getLogger(__name__) MAX_OBJECT_CLASSIFICATIONS = 16 -class CustomStateClassificationProcessor(RealTimeProcessorApi): +class CustomStateClassificationProcessor(DeferredRealtimeProcessorApi): def __init__( self, config: FrigateConfig, @@ -48,7 +40,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): requestor: InterProcessRequestor, metrics: DataProcessorMetrics, ): - super().__init__(config, metrics) + super().__init__(config, metrics, max_queue=4) self.model_config = model_config if not self.model_config.name: @@ -259,14 +251,34 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) return - frame = rgb[y1:y2, x1:x2] + cropped_frame = rgb[y1:y2, x1:x2] try: - resized_frame = cv2.resize(frame, (224, 224)) + resized_frame = cv2.resize(cropped_frame, (224, 224)) except Exception: logger.warning("Failed to resize image for state classification") return + # Copy for training image saves on worker thread + crop_bgr = cv2.cvtColor(cropped_frame, cv2.COLOR_RGB2BGR) + + self._enqueue_task(("classify", camera, now, resized_frame, crop_bgr)) + + def _process_task(self, task: Any) -> None: + kind = task[0] + if kind == "classify": + _, camera, timestamp, resized_frame, crop_bgr = task + self._classify_state(camera, timestamp, resized_frame, crop_bgr) + elif kind == "reload": + self.__build_detector() + + def _classify_state( + self, + camera: str, + timestamp: float, + resized_frame: np.ndarray, + crop_bgr: np.ndarray, + ) -> None: if self.interpreter is None: # When interpreter is None, always save (score is 0.0, which is < 1.0) if self._should_save_image(camera, "unknown", 0.0): @@ -277,15 +289,18 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) write_classification_attempt( self.train_dir, - cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + crop_bgr, "none-none", - now, + timestamp, "unknown", 0.0, max_files=save_attempts, ) return + if not self.tensor_input_details or not self.tensor_output_details: + return + input = np.expand_dims(resized_frame, axis=0) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.invoke() @@ -298,7 +313,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) best_id = int(np.argmax(probs)) score = round(probs[best_id], 2) - self.__update_metrics(datetime.datetime.now().timestamp() - now) + self.__update_metrics(datetime.datetime.now().timestamp() - timestamp) detected_state = self.labelmap[best_id] @@ -310,9 +325,9 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) write_classification_attempt( self.train_dir, - cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), + crop_bgr, "none-none", - now, + timestamp, detected_state, score, max_files=save_attempts, @@ -327,9 +342,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): verified_state = self.verify_state_change(camera, detected_state) if verified_state is not None: - self.requestor.send_data( - f"{camera}/classification/{self.model_config.name}", - verified_state, + self._emit_result( + { + "type": "classification", + "processor": "state", + "model_name": self.model_config.name, + "camera": camera, + "state": verified_state, + } ) def handle_request( @@ -337,14 +357,19 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): ) -> dict[str, Any] | None: if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.__build_detector() - logger.info( - f"Successfully loaded updated model for {self.model_config.name}" - ) - return { - "success": True, - "message": f"Loaded {self.model_config.name} model.", - } + + def _do_reload(data: dict[str, Any]) -> dict[str, Any]: + self.__build_detector() + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" + ) + return { + "success": True, + "message": f"Loaded {self.model_config.name} model.", + } + + result: dict[str, Any] = self._enqueue_request(_do_reload, request_data) + return result else: return None else: @@ -354,7 +379,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): pass -class CustomObjectClassificationProcessor(RealTimeProcessorApi): +class CustomObjectClassificationProcessor(DeferredRealtimeProcessorApi): def __init__( self, config: FrigateConfig, @@ -363,7 +388,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): requestor: InterProcessRequestor, metrics: DataProcessorMetrics, ): - super().__init__(config, metrics) + super().__init__(config, metrics, max_queue=8) self.model_config = model_config if not self.model_config.name: @@ -536,18 +561,41 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) - crop = rgb[ - y:y2, - x:x2, - ] + crop = rgb[y:y2, x:x2] - if crop.shape != (224, 224): - try: - resized_crop = cv2.resize(crop, (224, 224)) - except Exception: - logger.warning("Failed to resize image for state classification") - return + try: + resized_crop = cv2.resize(crop, (224, 224)) + except Exception: + logger.warning("Failed to resize image for object classification") + return + # Copy crop for training images (will be used on worker thread) + crop_bgr = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR) + + self._enqueue_task( + ("classify", object_id, obj_data["camera"], now, resized_crop, crop_bgr) + ) + + def _process_task(self, task: Any) -> None: + kind = task[0] + if kind == "classify": + _, object_id, camera, timestamp, resized_crop, crop_bgr = task + self._classify_object(object_id, camera, timestamp, resized_crop, crop_bgr) + elif kind == "expire": + _, object_id = task + if object_id in self.classification_history: + self.classification_history.pop(object_id) + elif kind == "reload": + self.__build_detector() + + def _classify_object( + self, + object_id: str, + camera: str, + timestamp: float, + resized_crop: np.ndarray, + crop_bgr: np.ndarray, + ) -> None: if self.interpreter is None: save_attempts = ( self.model_config.save_attempts @@ -556,9 +604,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) write_classification_attempt( self.train_dir, - cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), + crop_bgr, object_id, - now, + timestamp, "unknown", 0.0, max_files=save_attempts, @@ -569,7 +617,10 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): if object_id not in self.classification_history: self.classification_history[object_id] = [] - self.classification_history[object_id].append(("unknown", 0.0, now)) + self.classification_history[object_id].append(("unknown", 0.0, timestamp)) + return + + if not self.tensor_input_details or not self.tensor_output_details: return input = np.expand_dims(resized_crop, axis=0) @@ -584,7 +635,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) best_id = int(np.argmax(probs)) score = round(probs[best_id], 2) - self.__update_metrics(datetime.datetime.now().timestamp() - now) + self.__update_metrics(datetime.datetime.now().timestamp() - timestamp) save_attempts = ( self.model_config.save_attempts @@ -593,9 +644,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): ) write_classification_attempt( self.train_dir, - cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), + crop_bgr, object_id, - now, + timestamp, self.labelmap[best_id], score, max_files=save_attempts, @@ -610,92 +661,57 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): sub_label = self.labelmap[best_id] logger.debug( - f"{self.model_config.name}: Object {object_id} (label={obj_data['label']}) passed threshold with sub_label={sub_label}, score={score}" + f"{self.model_config.name}: Object {object_id} passed threshold with sub_label={sub_label}, score={score}" ) consensus_label, consensus_score = self.get_weighted_score( - object_id, sub_label, score, now + object_id, sub_label, score, timestamp ) logger.debug( f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}" ) - if consensus_label is not None: - camera = obj_data["camera"] - logger.debug( - f"{self.model_config.name}: Publishing sub_label={consensus_label} for {obj_data['label']} object {object_id} on {camera}" + if consensus_label is not None and self.model_config.object_config is not None: + self._emit_result( + { + "type": "classification", + "processor": "object", + "model_name": self.model_config.name, + "classification_type": self.model_config.object_config.classification_type, + "object_id": object_id, + "camera": camera, + "timestamp": timestamp, + "label": consensus_label, + "score": consensus_score, + } ) - if ( - self.model_config.object_config.classification_type - == ObjectClassificationType.sub_label - ): - self.sub_label_publisher.publish( - (object_id, consensus_label, consensus_score), - EventMetadataTypeEnum.sub_label, - ) - self.requestor.send_data( - "tracked_object_update", - json.dumps( - { - "type": TrackedObjectUpdateTypesEnum.classification, - "id": object_id, - "camera": camera, - "timestamp": now, - "model": self.model_config.name, - "sub_label": consensus_label, - "score": consensus_score, - } - ), - ) - elif ( - self.model_config.object_config.classification_type - == ObjectClassificationType.attribute - ): - self.sub_label_publisher.publish( - ( - object_id, - self.model_config.name, - consensus_label, - consensus_score, - ), - EventMetadataTypeEnum.attribute.value, - ) - self.requestor.send_data( - "tracked_object_update", - json.dumps( - { - "type": TrackedObjectUpdateTypesEnum.classification, - "id": object_id, - "camera": camera, - "timestamp": now, - "model": self.model_config.name, - "attribute": consensus_label, - "score": consensus_score, - } - ), - ) - - def handle_request(self, topic: str, request_data: dict) -> dict | None: + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: if topic == EmbeddingsRequestEnum.reload_classification_model.value: if request_data.get("model_name") == self.model_config.name: - self.__build_detector() - logger.info( - f"Successfully loaded updated model for {self.model_config.name}" - ) - return { - "success": True, - "message": f"Loaded {self.model_config.name} model.", - } + + def _do_reload(data: dict[str, Any]) -> dict[str, Any]: + self.__build_detector() + logger.info( + f"Successfully loaded updated model for {self.model_config.name}" + ) + return { + "success": True, + "message": f"Loaded {self.model_config.name} model.", + } + + result: dict[str, Any] = self._enqueue_request(_do_reload, request_data) + return result else: return None else: return None def expire_object(self, object_id: str, camera: str) -> None: - if object_id in self.classification_history: - self.classification_history.pop(object_id) + self._enqueue_task(("expire", object_id)) def write_classification_attempt( diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 3f066a860..ea1c9a118 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -2,6 +2,7 @@ import base64 import datetime +import json import logging import threading from multiprocessing.synchronize import Event as MpEvent @@ -33,6 +34,7 @@ from frigate.config.camera.updater import ( CameraConfigUpdateEnum, CameraConfigUpdateSubscriber, ) +from frigate.config.classification import ObjectClassificationType from frigate.data_processing.common.license_plate.model import ( LicensePlateModelRunner, ) @@ -61,6 +63,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.genai import GenAIClientManager from frigate.models import Event, Recordings, ReviewSegment, Trigger +from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.file import get_event_thumbnail_bytes from frigate.util.image import SharedMemoryFrameManager @@ -274,10 +277,15 @@ class EmbeddingMaintainer(threading.Thread): self._process_recordings_updates() self._process_review_updates() self._process_frame_updates() + self._process_deferred_results() self._expire_dedicated_lpr() self._process_finalized() self._process_event_metadata() + # Shutdown deferred processors + for processor in self.realtime_processors: + processor.shutdown() + self.config_updater.stop() self.enrichment_config_subscriber.stop() self.event_subscriber.stop() @@ -316,10 +324,9 @@ class EmbeddingMaintainer(threading.Thread): model_name = topic.split("/")[-1] if model_config is None: - self.realtime_processors = [ - processor - for processor in self.realtime_processors - if not ( + remaining = [] + for processor in self.realtime_processors: + if ( isinstance( processor, ( @@ -328,8 +335,11 @@ class EmbeddingMaintainer(threading.Thread): ), ) and processor.model_config.name == model_name - ) - ] + ): + processor.shutdown() + else: + remaining.append(processor) + self.realtime_processors = remaining logger.info( f"Successfully removed classification processor for model: {model_name}" @@ -697,6 +707,68 @@ class EmbeddingMaintainer(threading.Thread): self.frame_manager.close(frame_name) + def _process_deferred_results(self) -> None: + """Drain results from deferred processors and perform IPC side-effects.""" + for processor in self.realtime_processors: + results = processor.drain_results() + + for result in results: + if result.get("type") != "classification": + continue + + if result["processor"] == "state": + self.requestor.send_data( + f"{result['camera']}/classification/{result['model_name']}", + result["state"], + ) + elif result["processor"] == "object": + object_id = result["object_id"] + camera = result["camera"] + timestamp = result["timestamp"] + model_name = result["model_name"] + label = result["label"] + score = result["score"] + classification_type = result["classification_type"] + + if classification_type == ObjectClassificationType.sub_label: + self.event_metadata_publisher.publish( + (object_id, label, score), + EventMetadataTypeEnum.sub_label, + ) + self.requestor.send_data( + "tracked_object_update", + json.dumps( + { + "type": TrackedObjectUpdateTypesEnum.classification, + "id": object_id, + "camera": camera, + "timestamp": timestamp, + "model": model_name, + "sub_label": label, + "score": score, + } + ), + ) + elif classification_type == ObjectClassificationType.attribute: + self.event_metadata_publisher.publish( + (object_id, model_name, label, score), + EventMetadataTypeEnum.attribute.value, + ) + self.requestor.send_data( + "tracked_object_update", + json.dumps( + { + "type": TrackedObjectUpdateTypesEnum.classification, + "id": object_id, + "camera": camera, + "timestamp": timestamp, + "model": model_name, + "attribute": label, + "score": score, + } + ), + ) + def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None: """Embed the thumbnail for an event.""" if not self.config.semantic_search.enabled: diff --git a/frigate/jobs/export.py b/frigate/jobs/export.py new file mode 100644 index 000000000..4540f7dd8 --- /dev/null +++ b/frigate/jobs/export.py @@ -0,0 +1,387 @@ +"""Export job management with queued background execution.""" + +import logging +import os +import threading +import time +from dataclasses import dataclass +from pathlib import Path +from queue import Full, Queue +from typing import Any, Optional + +from peewee import DoesNotExist + +from frigate.config import FrigateConfig +from frigate.jobs.job import Job +from frigate.models import Export +from frigate.record.export import PlaybackSourceEnum, RecordingExporter +from frigate.types import JobStatusTypesEnum + +logger = logging.getLogger(__name__) + +# Maximum number of jobs that can sit in the queue waiting to run. +# Prevents a runaway client from unbounded memory growth. +MAX_QUEUED_EXPORT_JOBS = 100 + + +class ExportQueueFullError(RuntimeError): + """Raised when the export queue is at capacity.""" + + +@dataclass +class ExportJob(Job): + """Job state for export operations.""" + + job_type: str = "export" + camera: str = "" + name: Optional[str] = None + image_path: Optional[str] = None + export_case_id: Optional[str] = None + request_start_time: float = 0.0 + request_end_time: float = 0.0 + playback_source: str = PlaybackSourceEnum.recordings.value + ffmpeg_input_args: Optional[str] = None + ffmpeg_output_args: Optional[str] = None + cpu_fallback: bool = False + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for API responses. + + Only exposes fields that are part of the public ExportJobModel schema. + Internal execution details (image_path, ffmpeg args, cpu_fallback) are + intentionally omitted so they don't leak through the API. + """ + return { + "id": self.id, + "job_type": self.job_type, + "status": self.status, + "camera": self.camera, + "name": self.name, + "export_case_id": self.export_case_id, + "request_start_time": self.request_start_time, + "request_end_time": self.request_end_time, + "start_time": self.start_time, + "end_time": self.end_time, + "error_message": self.error_message, + "results": self.results, + } + + +class ExportQueueWorker(threading.Thread): + """Worker that executes queued exports.""" + + def __init__(self, manager: "ExportJobManager", worker_index: int) -> None: + super().__init__( + daemon=True, + name=f"export_queue_worker_{worker_index}", + ) + self.manager = manager + + def run(self) -> None: + while True: + job = self.manager.queue.get() + + try: + self.manager.run_job(job) + except Exception: + logger.exception( + "Export queue worker failed while processing %s", job.id + ) + finally: + self.manager.queue.task_done() + + +class ExportJobManager: + """Concurrency-limited manager for queued export jobs.""" + + def __init__( + self, + config: FrigateConfig, + max_concurrent: int, + max_queued: int = MAX_QUEUED_EXPORT_JOBS, + ) -> None: + self.config = config + self.max_concurrent = max(1, max_concurrent) + self.queue: Queue[ExportJob] = Queue(maxsize=max(1, max_queued)) + self.jobs: dict[str, ExportJob] = {} + self.lock = threading.Lock() + self.workers: list[ExportQueueWorker] = [] + self.started = False + + def ensure_started(self) -> None: + """Ensure worker threads are started exactly once.""" + with self.lock: + if self.started: + self._restart_dead_workers_locked() + return + + for index in range(self.max_concurrent): + worker = ExportQueueWorker(self, index) + worker.start() + self.workers.append(worker) + + self.started = True + + def _restart_dead_workers_locked(self) -> None: + for index, worker in enumerate(self.workers): + if worker.is_alive(): + continue + + logger.error( + "Export queue worker %s died unexpectedly, restarting", worker.name + ) + replacement = ExportQueueWorker(self, index) + replacement.start() + self.workers[index] = replacement + + def enqueue(self, job: ExportJob) -> str: + """Queue a job for background execution. + + Raises ExportQueueFullError if the queue is at capacity. + """ + self.ensure_started() + + try: + self.queue.put_nowait(job) + except Full as err: + raise ExportQueueFullError( + "Export queue is full; try again once current exports finish" + ) from err + + with self.lock: + self.jobs[job.id] = job + + return job.id + + def get_job(self, job_id: str) -> Optional[ExportJob]: + """Get a job by ID.""" + with self.lock: + return self.jobs.get(job_id) + + def list_active_jobs(self) -> list[ExportJob]: + """List queued and running jobs.""" + with self.lock: + return [ + job + for job in self.jobs.values() + if job.status in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running) + ] + + def cancel_queued_jobs_for_case(self, case_id: str) -> list[ExportJob]: + """Cancel queued export jobs assigned to a deleted case.""" + cancelled_jobs: list[ExportJob] = [] + + with self.lock: + with self.queue.mutex: + retained_jobs: list[ExportJob] = [] + + while self.queue.queue: + job = self.queue.queue.popleft() + + if ( + job.export_case_id == case_id + and job.status == JobStatusTypesEnum.queued + ): + job.status = JobStatusTypesEnum.cancelled + job.end_time = time.time() + cancelled_jobs.append(job) + continue + + retained_jobs.append(job) + + self.queue.queue.extend(retained_jobs) + + if cancelled_jobs: + self.queue.unfinished_tasks = max( + 0, + self.queue.unfinished_tasks - len(cancelled_jobs), + ) + if self.queue.unfinished_tasks == 0: + self.queue.all_tasks_done.notify_all() + self.queue.not_full.notify_all() + + return cancelled_jobs + + def available_slots(self) -> int: + """Approximate number of additional jobs that could be queued right now. + + Uses Queue.qsize() which is best-effort; callers should treat the + result as advisory since another thread could enqueue between + checking and enqueueing. + """ + return max(0, self.queue.maxsize - self.queue.qsize()) + + def run_job(self, job: ExportJob) -> None: + """Execute a queued export job.""" + job.status = JobStatusTypesEnum.running + job.start_time = time.time() + + exporter = RecordingExporter( + self.config, + job.id, + job.camera, + job.name, + job.image_path, + int(job.request_start_time), + int(job.request_end_time), + PlaybackSourceEnum(job.playback_source), + job.export_case_id, + job.ffmpeg_input_args, + job.ffmpeg_output_args, + job.cpu_fallback, + ) + + try: + exporter.run() + export = Export.get_or_none(Export.id == job.id) + if export is None: + job.status = JobStatusTypesEnum.failed + job.error_message = "Export failed" + elif export.in_progress: + job.status = JobStatusTypesEnum.failed + job.error_message = "Export did not complete" + else: + job.status = JobStatusTypesEnum.success + job.results = { + "export_id": export.id, + "export_case_id": export.export_case_id, + "video_path": export.video_path, + "thumb_path": export.thumb_path, + } + except DoesNotExist: + job.status = JobStatusTypesEnum.failed + job.error_message = "Export not found" + except Exception as err: + logger.exception("Export job %s failed: %s", job.id, err) + job.status = JobStatusTypesEnum.failed + job.error_message = str(err) + finally: + job.end_time = time.time() + + +_job_manager: Optional[ExportJobManager] = None +_job_manager_lock = threading.Lock() + + +def _get_max_concurrent(config: FrigateConfig) -> int: + return int(config.record.export.max_concurrent) + + +def reap_stale_exports() -> None: + """Sweep Export rows stuck with in_progress=True from previous sessions. + + On Frigate startup no export job is alive yet, so any in_progress=True + row must be a leftover from a previous session that crashed, was killed + mid-export, or returned early from RecordingExporter.run() without + flipping the flag. For each stale row we either: + + - delete the row (and any thumb) if the video file is missing or empty, + since there is nothing worth recovering + - flip in_progress to False if the video file exists on disk and is + non-empty, treating it as a completed export the user can manage + through the normal UI + + Must only be called when the export job manager is certain to have no + active jobs — i.e., at Frigate startup, before any worker runs. + + All exceptions are caught and logged; the caller does not need to wrap + this in a try/except. A failure on a single row will not stop the rest + of the sweep, and a failure in the top-level query will log and return. + """ + try: + stale_exports = list(Export.select().where(Export.in_progress == True)) # noqa: E712 + except Exception: + logger.exception("Failed to query stale in-progress exports") + return + + if not stale_exports: + logger.debug("No stale in-progress exports found on startup") + return + + flipped = 0 + deleted = 0 + errored = 0 + + for export in stale_exports: + try: + video_path = export.video_path + has_usable_file = False + + if video_path: + try: + has_usable_file = os.path.getsize(video_path) > 0 + except OSError: + has_usable_file = False + + if has_usable_file: + # Unassign from any case on recovery: the user should + # re-triage a recovered export rather than have it silently + # reappear inside a case they curated. + Export.update( + {Export.in_progress: False, Export.export_case: None} + ).where(Export.id == export.id).execute() + flipped += 1 + logger.info( + "Recovered stale in-progress export %s (file intact on disk)", + export.id, + ) + continue + + if export.thumb_path: + Path(export.thumb_path).unlink(missing_ok=True) + if video_path: + Path(video_path).unlink(missing_ok=True) + Export.delete().where(Export.id == export.id).execute() + deleted += 1 + logger.info( + "Deleted stale in-progress export %s (no usable file on disk)", + export.id, + ) + except Exception: + errored += 1 + logger.exception("Failed to reap stale export %s", export.id) + + logger.info( + "Stale export cleanup complete: %d recovered, %d deleted, %d errored", + flipped, + deleted, + errored, + ) + + +def get_export_job_manager(config: FrigateConfig) -> ExportJobManager: + """Get or create the singleton export job manager.""" + global _job_manager + + with _job_manager_lock: + if _job_manager is None: + _job_manager = ExportJobManager(config, _get_max_concurrent(config)) + _job_manager.ensure_started() + return _job_manager + + +def start_export_job(config: FrigateConfig, job: ExportJob) -> str: + """Queue an export job and return its ID.""" + return get_export_job_manager(config).enqueue(job) + + +def get_export_job(config: FrigateConfig, job_id: str) -> Optional[ExportJob]: + """Get a queued or completed export job by ID.""" + return get_export_job_manager(config).get_job(job_id) + + +def list_active_export_jobs(config: FrigateConfig) -> list[ExportJob]: + """List queued and running export jobs.""" + return get_export_job_manager(config).list_active_jobs() + + +def cancel_queued_export_jobs_for_case( + config: FrigateConfig, case_id: str +) -> list[ExportJob]: + """Cancel queued export jobs that still point at a deleted case.""" + return get_export_job_manager(config).cancel_queued_jobs_for_case(case_id) + + +def available_export_queue_slots(config: FrigateConfig) -> int: + """Approximate number of additional export jobs that could be queued now.""" + return get_export_job_manager(config).available_slots() diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 79b771cb2..e48b3e787 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -152,21 +152,12 @@ class OnvifController: cam = self.camera_configs[cam_name] try: - user = cam.onvif.user - password = cam.onvif.password - - if user is not None and isinstance(user, bytes): - user = user.decode("utf-8") - - if password is not None and isinstance(password, bytes): - password = password.decode("utf-8") - self.cams[cam_name] = { "onvif": ONVIFCamera( cam.onvif.host, cam.onvif.port, - user, - password, + cam.onvif.user, + cam.onvif.password, wsdl_dir=str(Path(find_spec("onvif").origin).parent / "wsdl"), adjust_time=cam.onvif.ignore_time_mismatch, encrypt=not cam.onvif.tls_insecure, @@ -459,15 +450,15 @@ class OnvifController: presets = [] for preset in presets: - # Ensure preset name is a Unicode string and handle UTF-8 characters correctly preset_name = getattr(preset, "Name") or f"preset {preset['token']}" - - if isinstance(preset_name, bytes): - preset_name = preset_name.decode("utf-8") - - # Convert to lowercase while preserving UTF-8 characters - preset_name_lower = preset_name.lower() - self.cams[camera_name]["presets"][preset_name_lower] = preset["token"] + # Some cameras (e.g. Reolink) return UTF-8 bytes that zeep decodes + # as latin-1, producing mojibake. Detect that and repair it by + # round-tripping through latin-1 -> utf-8. + try: + preset_name = preset_name.encode("latin-1").decode("utf-8") + except (UnicodeEncodeError, UnicodeDecodeError): + pass + self.cams[camera_name]["presets"][preset_name.lower()] = preset["token"] # get list of supported features supported_features = [] @@ -695,9 +686,6 @@ class OnvifController: self.cams[camera_name]["active"] = False async def _move_to_preset(self, camera_name: str, preset: str) -> None: - if isinstance(preset, bytes): - preset = preset.decode("utf-8") - preset = preset.lower() if preset not in self.cams[camera_name]["presets"]: diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index e3409652e..73868ea24 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -372,6 +372,7 @@ class RecordingMaintainer(threading.Thread): ) record_config = self.config.cameras[camera].record + segment_stats: SegmentInfo | None = None highest = None if record_config.continuous.days > 0: @@ -401,9 +402,19 @@ class RecordingMaintainer(threading.Thread): if highest == "continuous" else RetainModeEnum.motion ) - return await self.move_segment( - camera, start_time, end_time, duration, cache_path, record_mode - ) + segment_stats = self.segment_stats(camera, start_time, end_time) + + # Here we only check if we should move the segment based on non-object recording retention + # we will always want to check for overlapping review items below before dropping the segment + if not segment_stats.should_discard_segment(record_mode): + return await self.move_segment( + camera, + start_time, + end_time, + duration, + cache_path, + segment_stats, + ) # we fell through the continuous / motion check, so we need to check the review items # if the cached segment overlaps with the review items: @@ -435,15 +446,24 @@ class RecordingMaintainer(threading.Thread): if review.severity == "alert" else record_config.detections.retain.mode ) - # move from cache to recordings immediately - return await self.move_segment( - camera, - start_time, - end_time, - duration, - cache_path, - record_mode, - ) + + if segment_stats is None: + segment_stats = self.segment_stats(camera, start_time, end_time) + + if not segment_stats.should_discard_segment(record_mode): + # move from cache to recordings immediately + return await self.move_segment( + camera, + start_time, + end_time, + duration, + cache_path, + segment_stats, + ) + else: + self.drop_segment(cache_path) + return None + # if it doesn't overlap with an review item, go ahead and drop the segment # if it ends more than the configured pre_capture for the camera # BUT only if continuous/motion is NOT enabled (otherwise wait for processing) @@ -455,6 +475,7 @@ class RecordingMaintainer(threading.Thread): retain_cutoff = datetime.datetime.fromtimestamp( most_recently_processed_frame_time - record_config.event_pre_capture ).astimezone(datetime.timezone.utc) + if end_time < retain_cutoff: self.drop_segment(cache_path) @@ -578,15 +599,8 @@ class RecordingMaintainer(threading.Thread): end_time: datetime.datetime, duration: float, cache_path: str, - store_mode: RetainModeEnum, + segment_info: SegmentInfo, ) -> Optional[dict[str, Any]]: - segment_info = self.segment_stats(camera, start_time, end_time) - - # check if the segment shouldn't be stored - if segment_info.should_discard_segment(store_mode): - self.drop_segment(cache_path) - return None - # directory will be in utc due to start_time being in utc directory = os.path.join( RECORD_DIR, diff --git a/frigate/test/http_api/test_http_export.py b/frigate/test/http_api/test_http_export.py new file mode 100644 index 000000000..e0ceec559 --- /dev/null +++ b/frigate/test/http_api/test_http_export.py @@ -0,0 +1,1433 @@ +import os +import tempfile +from unittest.mock import patch + +from frigate.jobs.export import ( + ExportJob, + get_export_job_manager, + reap_stale_exports, + start_export_job, +) +from frigate.models import Export, ExportCase, Previews, Recordings +from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp + + +class TestHttpExport(BaseTestHttp): + def setUp(self): + super().setUp([Export, ExportCase, Previews, Recordings]) + self.minimal_config["cameras"]["backyard"] = { + "ffmpeg": { + "inputs": [{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + self.app = super().create_app() + + def tearDown(self): + self.app.dependency_overrides.clear() + super().tearDown() + + def _insert_recording( + self, + recording_id: str, + camera: str, + start_time: float, + end_time: float, + ) -> None: + Recordings.create( + id=recording_id, + camera=camera, + path=f"/tmp/{recording_id}.mp4", + start_time=start_time, + end_time=end_time, + duration=end_time - start_time, + motion=0, + objects=0, + dBFS=0, + segment_size=1, + regions=0, + motion_heatmap=[], + ) + + def test_create_export_case_uses_wall_clock_time(self): + with patch("frigate.api.export.time.time", return_value=1234.5): + with AuthTestClient(self.app) as client: + response = client.post( + "/cases", + json={ + "name": "Investigation", + "description": "A test case", + }, + ) + + assert response.status_code == 200 + response_json = response.json() + assert response_json["created_at"] == 1234.5 + assert response_json["updated_at"] == 1234.5 + + case = ExportCase.get(ExportCase.id == response_json["id"]) + assert case.created_at.timestamp() == 1234.5 + assert case.updated_at.timestamp() == 1234.5 + + def test_update_export_case_refreshes_updated_at(self): + case = ExportCase.create( + id="case123", + name="Old name", + description="Old description", + created_at=10, + updated_at=10, + ) + + with patch("frigate.api.export.time.time", return_value=2222.0): + with AuthTestClient(self.app) as client: + response = client.patch( + f"/cases/{case.id}", + json={"name": "New name", "description": "Updated"}, + ) + + assert response.status_code == 200 + + refreshed = ExportCase.get(ExportCase.id == case.id) + assert refreshed.name == "New name" + assert refreshed.description == "Updated" + assert refreshed.updated_at.timestamp() == 2222.0 + + def test_delete_export_case_delete_exports_cancels_queued_jobs(self): + case = ExportCase.create( + id="case_delete_me", + name="Delete me", + description="", + created_at=10, + updated_at=10, + ) + other_case = ExportCase.create( + id="case_keep_me", + name="Keep me", + description="", + created_at=20, + updated_at=20, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + video_path = os.path.join(tmpdir, "case_export.mp4") + thumb_path = os.path.join(tmpdir, "case_export.webp") + other_video_path = os.path.join(tmpdir, "other_export.mp4") + other_thumb_path = os.path.join(tmpdir, "other_export.webp") + + with open(video_path, "wb") as handle: + handle.write(b"case") + with open(thumb_path, "wb") as handle: + handle.write(b"thumb") + with open(other_video_path, "wb") as handle: + handle.write(b"other") + with open(other_thumb_path, "wb") as handle: + handle.write(b"thumb") + + Export.create( + id="export_in_case", + camera="front_door", + name="Case export", + date=100, + video_path=video_path, + thumb_path=thumb_path, + in_progress=False, + export_case=case, + ) + Export.create( + id="export_other_case", + camera="front_door", + name="Other export", + date=110, + video_path=other_video_path, + thumb_path=other_thumb_path, + in_progress=False, + export_case=other_case, + ) + + with ( + patch("frigate.jobs.export._job_manager", None), + patch( + "frigate.jobs.export.ExportJobManager.ensure_started", + autospec=True, + return_value=None, + ), + ): + start_export_job( + self.app.frigate_config, + ExportJob( + id="queued_case_job", + camera="front_door", + export_case_id=case.id, + request_start_time=100, + request_end_time=120, + ), + ) + start_export_job( + self.app.frigate_config, + ExportJob( + id="queued_other_job", + camera="front_door", + export_case_id=other_case.id, + request_start_time=130, + request_end_time=150, + ), + ) + + manager = get_export_job_manager(self.app.frigate_config) + assert {job.id for job in manager.list_active_jobs()} == { + "queued_case_job", + "queued_other_job", + } + + with AuthTestClient(self.app) as client: + response = client.delete(f"/cases/{case.id}?delete_exports=true") + + assert response.status_code == 200 + assert ExportCase.get_or_none(ExportCase.id == case.id) is None + assert ExportCase.get_or_none(ExportCase.id == other_case.id) is not None + assert Export.get_or_none(Export.id == "export_in_case") is None + assert Export.get_or_none(Export.id == "export_other_case") is not None + assert not os.path.exists(video_path) + assert not os.path.exists(thumb_path) + + cancelled_job = manager.get_job("queued_case_job") + assert cancelled_job is not None + assert cancelled_job.status == "cancelled" + + remaining_job = manager.get_job("queued_other_job") + assert remaining_job is not None + assert remaining_job.status == "queued" + assert [job.id for job in manager.list_active_jobs()] == [ + "queued_other_job" + ] + + def test_batch_export_creates_case_and_reports_partial_success(self): + self._insert_recording("rec-front", "front_door", 100, 200) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + "friendly_name": "Incident - Front Door", + }, + { + "camera": "backyard", + "start_time": 110, + "end_time": 150, + "friendly_name": "Incident - Backyard", + }, + ], + "new_case_name": "Case Alpha", + "new_case_description": "Batch export", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert len(response_json["export_ids"]) == 1 + assert response_json["results"] == [ + { + "camera": "front_door", + "export_id": response_json["export_ids"][0], + "success": True, + "status": "queued", + "error": None, + "item_index": 0, + "client_item_id": None, + }, + { + "camera": "backyard", + "export_id": None, + "success": False, + "status": None, + "error": "No recordings found for time range", + "item_index": 1, + "client_item_id": None, + }, + ] + start_export_job.assert_called_once() + + case = ExportCase.get(ExportCase.id == response_json["export_case_id"]) + assert case.name == "Case Alpha" + assert case.description == "Batch export" + + def test_single_export_is_queued_immediately(self): + self._insert_recording("rec-front", "front_door", 100, 200) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/export/front_door/start/110/end/150", + json={ + "name": "Queued export", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert response_json["success"] is True + assert response_json["status"] == "queued" + assert response_json["export_id"].startswith("front_door_") + start_export_job.assert_called_once() + + def test_single_export_returns_503_when_queue_full(self): + self._insert_recording("rec-front", "front_door", 100, 200) + + from frigate.jobs.export import ExportQueueFullError + + with patch( + "frigate.api.export.start_export_job", + side_effect=ExportQueueFullError("Export queue is full"), + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/export/front_door/start/110/end/150", + json={ + "name": "Rejected export", + }, + ) + + assert response.status_code == 503 + response_json = response.json() + assert response_json["success"] is False + assert "queue is full" in response_json["message"].lower() + + def test_batch_export_returns_503_when_queue_cannot_fit_batch(self): + self._insert_recording("rec-front", "front_door", 100, 200) + self._insert_recording("rec-back", "backyard", 100, 200) + + with patch( + "frigate.api.export.available_export_queue_slots", + return_value=1, + ): + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "backyard", + "start_time": 110, + "end_time": 150, + }, + ], + "new_case_name": "Overflow Case", + }, + ) + + assert response.status_code == 503 + assert response.json()["success"] is False + start_export_job.assert_not_called() + + # Empty case should NOT have been created + assert ExportCase.select().count() == 0 + + def test_get_active_export_jobs_returns_queue_state(self): + queued_job = ExportJob( + id="front_door_queued", + camera="front_door", + status="queued", + request_start_time=100, + request_end_time=150, + ) + + with patch( + "frigate.api.export.list_active_export_jobs", + return_value=[queued_job], + ): + with AuthTestClient(self.app) as client: + response = client.get("/jobs/export") + + assert response.status_code == 200 + assert response.json() == [queued_job.to_dict()] + + def test_reap_stale_exports_deletes_rows_with_no_file(self): + with tempfile.TemporaryDirectory() as tmpdir: + stale_video = os.path.join(tmpdir, "stale.mp4") + stale_thumb = os.path.join(tmpdir, "stale.webp") + # stale_video is intentionally NOT created + with open(stale_thumb, "w") as handle: + handle.write("thumb") + + Export.create( + id="stale_no_file", + camera="front_door", + name="Stuck export", + date=100, + video_path=stale_video, + thumb_path=stale_thumb, + in_progress=True, + ) + + reap_stale_exports() + + assert Export.get_or_none(Export.id == "stale_no_file") is None + assert not os.path.exists(stale_thumb) + + def test_reap_stale_exports_recovers_rows_with_file(self): + with tempfile.TemporaryDirectory() as tmpdir: + intact_video = os.path.join(tmpdir, "intact.mp4") + intact_thumb = os.path.join(tmpdir, "intact.webp") + with open(intact_video, "wb") as handle: + handle.write(b"not actually an mp4 but non-empty") + with open(intact_thumb, "wb") as handle: + handle.write(b"thumb") + + case = ExportCase.create( + id="case_for_stale", + name="Curated case", + description="", + created_at=10, + updated_at=10, + ) + + Export.create( + id="stale_with_file", + camera="front_door", + name="Recoverable export", + date=200, + video_path=intact_video, + thumb_path=intact_thumb, + in_progress=True, + export_case=case, + ) + + reap_stale_exports() + + recovered = Export.get(Export.id == "stale_with_file") + assert recovered.in_progress is False + # Case link must be cleared so the user re-triages the recovered row + assert recovered.export_case is None + # The case itself is untouched + assert ExportCase.get_or_none(ExportCase.id == "case_for_stale") is not None + # Recovered files must NOT be unlinked + assert os.path.exists(intact_video) + assert os.path.exists(intact_thumb) + + def test_reap_stale_exports_delete_path_severs_case_link(self): + with tempfile.TemporaryDirectory() as tmpdir: + missing_video = os.path.join(tmpdir, "missing.mp4") + # file intentionally not created + + case = ExportCase.create( + id="case_losing_member", + name="Case losing a member", + description="", + created_at=20, + updated_at=20, + ) + + Export.create( + id="stale_in_case_no_file", + camera="front_door", + name="Stuck and in a case", + date=250, + video_path=missing_video, + thumb_path="", + in_progress=True, + export_case=case, + ) + + reap_stale_exports() + + # The export row is gone entirely + assert Export.get_or_none(Export.id == "stale_in_case_no_file") is None + # The case stays but has no exports pointing at it + remaining_case = ExportCase.get(ExportCase.id == "case_losing_member") + assert list(remaining_case.exports) == [] + + def test_reap_stale_exports_deletes_rows_with_empty_file(self): + with tempfile.TemporaryDirectory() as tmpdir: + empty_video = os.path.join(tmpdir, "empty.mp4") + # Create a zero-byte file — partial ffmpeg output + open(empty_video, "w").close() + + Export.create( + id="stale_empty_file", + camera="front_door", + name="Zero byte export", + date=300, + video_path=empty_video, + thumb_path="", + in_progress=True, + ) + + reap_stale_exports() + + assert Export.get_or_none(Export.id == "stale_empty_file") is None + assert not os.path.exists(empty_video) + + def test_reap_stale_exports_skips_completed_rows(self): + with tempfile.TemporaryDirectory() as tmpdir: + done_video = os.path.join(tmpdir, "done.mp4") + with open(done_video, "wb") as handle: + handle.write(b"done") + + Export.create( + id="already_done", + camera="front_door", + name="Completed export", + date=400, + video_path=done_video, + thumb_path="", + in_progress=False, + ) + + reap_stale_exports() + + row = Export.get(Export.id == "already_done") + assert row.in_progress is False + assert os.path.exists(done_video) + + def test_batch_export_without_case_goes_to_uncategorized(self): + """Exports without a case target go to uncategorized.""" + self._insert_recording("rec-front", "front_door", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert response_json["export_case_id"] is None + assert ExportCase.select().count() == 0 + + # --- /exports/batch (item-shaped multi-export) --------------------------- + + def test_batch_export_happy_path_creates_case_and_queues_all(self): + self._insert_recording("rec-front", "front_door", 100, 400) + self._insert_recording("rec-back", "backyard", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "front_door", + "start_time": 200, + "end_time": 240, + }, + { + "camera": "backyard", + "start_time": 300, + "end_time": 340, + }, + ], + "new_case_name": "Incident Apr 11", + "new_case_description": "Review items", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert len(response_json["export_ids"]) == 3 + assert all(r["success"] for r in response_json["results"]) + assert [r["item_index"] for r in response_json["results"]] == [0, 1, 2] + assert start_export_job.call_count == 3 + + case = ExportCase.get(ExportCase.id == response_json["export_case_id"]) + assert case.name == "Incident Apr 11" + assert case.description == "Review items" + + def test_batch_export_existing_case_does_not_create_new_case(self): + self._insert_recording("rec-front", "front_door", 100, 400) + ExportCase.create( + id="existing_case", + name="Existing", + description="", + created_at=10, + updated_at=10, + ) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "export_case_id": "existing_case", + }, + ) + + assert response.status_code == 202 + assert response.json()["export_case_id"] == "existing_case" + # No additional case was created + assert ExportCase.select().count() == 1 + + def test_batch_export_empty_items_rejected(self): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={"items": [], "new_case_name": "Empty"}, + ) + + assert response.status_code == 422 + + def test_batch_export_over_limit_rejected(self): + items = [ + {"camera": "front_door", "start_time": 100 + i, "end_time": 100 + i + 5} + for i in range(51) + ] + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={"items": items, "new_case_name": "Too many"}, + ) + + assert response.status_code == 422 + + def test_batch_export_end_before_start_rejected(self): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 200, + "end_time": 100, + } + ], + "new_case_name": "Bad range", + }, + ) + + assert response.status_code == 422 + assert ( + response.json()["detail"][0]["msg"] + == "Value error, end_time must be after start_time" + ) + + def test_batch_export_non_admin_without_case_goes_to_uncategorized(self): + """Non-admin batch exports go to uncategorized.""" + self._insert_recording("rec-front", "front_door", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={ + "items": [ + { + "camera": "front_door", + "start_time": 100, + "end_time": 150, + } + ], + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert response_json["export_case_id"] is None + assert ExportCase.select().count() == 0 + + def test_batch_export_camera_access_denied_fails_closed(self): + from fastapi import Request + + from frigate.api.auth import get_allowed_cameras_for_filter + + self._insert_recording("rec-front", "front_door", 100, 400) + + async def restricted(request: Request): + return ["front_door"] + + self.app.dependency_overrides[get_allowed_cameras_for_filter] = restricted + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "backyard", # not in allowed list + "start_time": 110, + "end_time": 150, + }, + ], + "new_case_name": "Nope", + }, + ) + + assert response.status_code == 403 + start_export_job.assert_not_called() + # No case created + assert ExportCase.select().count() == 0 + + def test_batch_export_case_not_found(self): + self._insert_recording("rec-front", "front_door", 100, 400) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "export_case_id": "does_not_exist", + }, + ) + + assert response.status_code == 404 + + def test_batch_export_per_item_missing_recordings_partial_success(self): + self._insert_recording("rec-front", "front_door", 100, 200) + # backyard has no recordings at all + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "backyard", + "start_time": 110, + "end_time": 150, + }, + ], + "new_case_name": "Partial", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert len(response_json["export_ids"]) == 1 + results_by_camera = {r["camera"]: r for r in response_json["results"]} + assert results_by_camera["front_door"]["success"] is True + assert results_by_camera["backyard"]["success"] is False + assert ( + results_by_camera["backyard"]["error"] + == "No recordings found for time range" + ) + start_export_job.assert_called_once() + + # Case is still created because at least one item succeeded + assert ( + ExportCase.get(ExportCase.id == response_json["export_case_id"]) is not None + ) + + def test_batch_export_same_camera_different_ranges_one_missing(self): + # Recording covers 100-200 only. First item fits, second does not. + self._insert_recording("rec-front", "front_door", 100, 200) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "front_door", + "start_time": 500, + "end_time": 540, + }, + ], + "new_case_name": "Split recordings", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert len(response_json["export_ids"]) == 1 + results = response_json["results"] + assert results[0]["success"] is True + assert results[0]["item_index"] == 0 + assert results[1]["success"] is False + assert results[1]["item_index"] == 1 + assert results[1]["error"] == "No recordings found for time range" + # Both results carry the same camera — item_index is the only way + # the client can tell them apart. + assert results[0]["camera"] == results[1]["camera"] == "front_door" + start_export_job.assert_called_once() + + def test_batch_export_all_missing_recordings_rolls_back_case(self): + # No recordings inserted at all + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "new_case_name": "Should rollback", + }, + ) + + assert response.status_code == 400 + start_export_job.assert_not_called() + assert ExportCase.select().count() == 0 + + def test_batch_export_preflight_queue_full(self): + self._insert_recording("rec-front", "front_door", 100, 400) + self._insert_recording("rec-back", "backyard", 100, 400) + + with patch( + "frigate.api.export.available_export_queue_slots", + return_value=1, + ): + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + }, + { + "camera": "backyard", + "start_time": 110, + "end_time": 150, + }, + ], + "new_case_name": "Queue full", + }, + ) + + assert response.status_code == 503 + start_export_job.assert_not_called() + assert ExportCase.select().count() == 0 + + def test_batch_export_all_enqueue_calls_fail_rolls_back_case(self): + self._insert_recording("rec-front", "front_door", 100, 400) + + def boom(_config, _job): + raise RuntimeError("simulated enqueue failure") + + with patch( + "frigate.api.export.start_export_job", + side_effect=boom, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "new_case_name": "Will fail", + }, + ) + + assert response.status_code == 202 + response_json = response.json() + assert response_json["export_ids"] == [] + assert response_json["export_case_id"] is None + assert ExportCase.select().count() == 0 + + def test_batch_export_rejects_invalid_image_path(self): + self._insert_recording("rec-front", "front_door", 100, 400) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + "image_path": "/etc/passwd", + } + ], + "new_case_name": "Bad image", + }, + ) + + assert response.status_code == 400 + assert ExportCase.select().count() == 0 + + def test_batch_export_non_admin_can_queue(self): + self._insert_recording("rec-front", "front_door", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "new_case_name": "Viewer export", + }, + ) + + assert response.status_code == 202 + assert len(response.json()["export_ids"]) == 1 + + def test_batch_export_non_admin_cannot_attach_to_existing_case(self): + """Non-admins can create cases via new_case_name but cannot attach + to existing cases they did not create. Closes a write-path hole that + would otherwise be reachable through the unfiltered GET /cases list. + """ + self._insert_recording("rec-front", "front_door", 100, 400) + ExportCase.create( + id="admins_only_case", + name="Admins only", + description="", + created_at=10, + updated_at=10, + ) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "export_case_id": "admins_only_case", + }, + ) + + assert response.status_code == 403 + start_export_job.assert_not_called() + # No exports should have been created in the target case + assert Export.select().count() == 0 + + def test_batch_export_admin_can_attach_to_existing_case(self): + self._insert_recording("rec-front", "front_door", 100, 400) + ExportCase.create( + id="shared_case", + name="Shared", + description="", + created_at=10, + updated_at=10, + ) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + } + ], + "export_case_id": "shared_case", + }, + ) + + assert response.status_code == 202 + assert response.json()["export_case_id"] == "shared_case" + # No additional case created + assert ExportCase.select().count() == 1 + + def test_batch_export_roundtrips_client_item_id(self): + self._insert_recording("rec-front", "front_door", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/batch", + json={ + "items": [ + { + "camera": "front_door", + "start_time": 110, + "end_time": 150, + "client_item_id": "review-123", + } + ], + "new_case_name": "Client id test", + }, + ) + + assert response.status_code == 202 + assert response.json()["results"][0]["client_item_id"] == "review-123" + + def test_single_export_non_admin_cannot_attach_to_existing_case(self): + """The single-export route has the same hole: non-admins should not + be able to smuggle exports into an existing case via export_case_id. + Admin-gating this matches /exports/batch. + """ + self._insert_recording("rec-front", "front_door", 100, 400) + ExportCase.create( + id="admins_only_case", + name="Admins only", + description="", + created_at=10, + updated_at=10, + ) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ) as start_export_job: + with AuthTestClient(self.app) as client: + response = client.post( + "/export/front_door/start/110/end/150", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={"export_case_id": "admins_only_case"}, + ) + + assert response.status_code == 403 + start_export_job.assert_not_called() + assert Export.select().count() == 0 + + def test_single_export_non_admin_can_still_export_without_case(self): + """Regression guard: the admin gate only applies to export_case_id, + not to single exports in general. Non-admins should still be able + to start a single export for a camera they have access to. + """ + self._insert_recording("rec-front", "front_door", 100, 400) + + with patch( + "frigate.api.export.start_export_job", + side_effect=lambda _config, job: job.id, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/export/front_door/start/110/end/150", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={}, + ) + + assert response.status_code == 202 + assert response.json()["success"] is True + + # ── Bulk delete exports ──────────────────────────────────────── + + def test_bulk_delete_exports_success(self): + """All IDs exist, none in-progress → 200, all deleted.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + Export.create( + id="exp2", + camera="front_door", + name="export_2", + date=200, + video_path="/tmp/exp2.mp4", + thumb_path="/tmp/exp2.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + json={"ids": ["exp1", "exp2"]}, + ) + + assert response.status_code == 200 + assert response.json()["success"] is True + assert Export.select().count() == 0 + + def test_bulk_delete_exports_single_item(self): + """Regression: single-item delete via batch endpoint.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + json={"ids": ["exp1"]}, + ) + + assert response.status_code == 200 + assert Export.select().count() == 0 + + def test_bulk_delete_exports_some_missing(self): + """Some IDs don't exist → 404, nothing deleted.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + json={"ids": ["exp1", "nonexistent"]}, + ) + + assert response.status_code == 404 + # Nothing deleted + assert Export.select().count() == 1 + + def test_bulk_delete_exports_all_missing(self): + """All IDs don't exist → 404.""" + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + json={"ids": ["nope1", "nope2"]}, + ) + + assert response.status_code == 404 + + def test_bulk_delete_exports_in_progress(self): + """Some exports in-progress → 400, nothing deleted.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path=f"{os.environ.get('EXPORT_DIR', '/media/frigate/exports')}/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=True, + ) + + with patch( + "frigate.api.export._get_files_in_use", + return_value={"exp1.mp4"}, + ): + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + json={"ids": ["exp1"]}, + ) + + assert response.status_code == 400 + assert Export.select().count() == 1 + + def test_bulk_delete_exports_non_admin_rejected(self): + """Non-admin users cannot bulk delete.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/delete", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={"ids": ["exp1"]}, + ) + + assert response.status_code == 403 + assert Export.select().count() == 1 + + # ── Bulk reassign exports ────────────────────────────────────── + + def test_bulk_reassign_exports_to_case(self): + """All IDs exist, case exists → 200, all reassigned.""" + ExportCase.create( + id="case1", + name="Test Case", + description="", + created_at=10, + updated_at=10, + ) + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + Export.create( + id="exp2", + camera="front_door", + name="export_2", + date=200, + video_path="/tmp/exp2.mp4", + thumb_path="/tmp/exp2.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + json={"ids": ["exp1", "exp2"], "export_case_id": "case1"}, + ) + + assert response.status_code == 200 + assert response.json()["success"] is True + for exp_id in ["exp1", "exp2"]: + exp = Export.get(Export.id == exp_id) + assert exp.export_case_id == "case1" + + def test_bulk_reassign_exports_to_null(self): + """Reassign to null (uncategorize) → 200.""" + ExportCase.create( + id="case1", + name="Test Case", + description="", + created_at=10, + updated_at=10, + ) + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + export_case="case1", + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + json={"ids": ["exp1"], "export_case_id": None}, + ) + + assert response.status_code == 200 + exp = Export.get(Export.id == "exp1") + assert exp.export_case_id is None + + def test_bulk_reassign_exports_single_item(self): + """Regression: single-item reassign via batch endpoint.""" + ExportCase.create( + id="case1", + name="Test Case", + description="", + created_at=10, + updated_at=10, + ) + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + json={"ids": ["exp1"], "export_case_id": "case1"}, + ) + + assert response.status_code == 200 + exp = Export.get(Export.id == "exp1") + assert exp.export_case_id == "case1" + + def test_bulk_reassign_exports_some_missing(self): + """Some IDs don't exist → 404, nothing reassigned.""" + ExportCase.create( + id="case1", + name="Test Case", + description="", + created_at=10, + updated_at=10, + ) + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + json={ + "ids": ["exp1", "nonexistent"], + "export_case_id": "case1", + }, + ) + + assert response.status_code == 404 + # Nothing reassigned + exp = Export.get(Export.id == "exp1") + assert exp.export_case_id is None + + def test_bulk_reassign_exports_case_not_found(self): + """Target case doesn't exist → 404.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + json={"ids": ["exp1"], "export_case_id": "nonexistent"}, + ) + + assert response.status_code == 404 + exp = Export.get(Export.id == "exp1") + assert exp.export_case_id is None + + def test_bulk_reassign_exports_non_admin_rejected(self): + """Non-admin users cannot bulk reassign.""" + Export.create( + id="exp1", + camera="front_door", + name="export_1", + date=100, + video_path="/tmp/exp1.mp4", + thumb_path="/tmp/exp1.jpg", + in_progress=False, + ) + + with AuthTestClient(self.app) as client: + response = client.post( + "/exports/reassign", + headers={"remote-user": "viewer", "remote-role": "viewer"}, + json={"ids": ["exp1"], "export_case_id": None}, + ) + + assert response.status_code == 403 diff --git a/frigate/test/test_chat_find_similar_objects.py b/frigate/test/test_chat_find_similar_objects.py new file mode 100644 index 000000000..38055658e --- /dev/null +++ b/frigate/test/test_chat_find_similar_objects.py @@ -0,0 +1,303 @@ +"""Tests for the find_similar_objects chat tool.""" + +import asyncio +import os +import tempfile +import unittest +from types import SimpleNamespace +from unittest.mock import MagicMock + +from playhouse.sqlite_ext import SqliteExtDatabase + +from frigate.api.chat import ( + _execute_find_similar_objects, + get_tool_definitions, +) +from frigate.api.chat_util import ( + DESCRIPTION_WEIGHT, + VISUAL_WEIGHT, + distance_to_score, + fuse_scores, +) +from frigate.embeddings.util import ZScoreNormalization +from frigate.models import Event + + +def _run(coro): + return asyncio.new_event_loop().run_until_complete(coro) + + +class TestDistanceToScore(unittest.TestCase): + def test_lower_distance_gives_higher_score(self): + stats = ZScoreNormalization() + # Seed the stats with a small distribution so stddev > 0. + stats._update([0.1, 0.2, 0.3, 0.4, 0.5]) + + close_score = distance_to_score(0.1, stats) + far_score = distance_to_score(0.5, stats) + + self.assertGreater(close_score, far_score) + self.assertGreaterEqual(close_score, 0.0) + self.assertLessEqual(close_score, 1.0) + self.assertGreaterEqual(far_score, 0.0) + self.assertLessEqual(far_score, 1.0) + + def test_uninitialized_stats_returns_neutral_score(self): + stats = ZScoreNormalization() # n == 0, stddev == 0 + self.assertEqual(distance_to_score(0.3, stats), 0.5) + + +class TestFuseScores(unittest.TestCase): + def test_weights_sum_to_one(self): + self.assertAlmostEqual(VISUAL_WEIGHT + DESCRIPTION_WEIGHT, 1.0) + + def test_fuses_both_sides(self): + fused = fuse_scores(visual_score=0.8, description_score=0.4) + expected = VISUAL_WEIGHT * 0.8 + DESCRIPTION_WEIGHT * 0.4 + self.assertAlmostEqual(fused, expected) + + def test_missing_description_uses_visual_only(self): + fused = fuse_scores(visual_score=0.7, description_score=None) + self.assertAlmostEqual(fused, 0.7) + + def test_missing_visual_uses_description_only(self): + fused = fuse_scores(visual_score=None, description_score=0.6) + self.assertAlmostEqual(fused, 0.6) + + def test_both_missing_returns_none(self): + self.assertIsNone(fuse_scores(visual_score=None, description_score=None)) + + +class TestToolDefinition(unittest.TestCase): + def test_find_similar_objects_is_registered(self): + tools = get_tool_definitions() + names = [t["function"]["name"] for t in tools] + self.assertIn("find_similar_objects", names) + + def test_find_similar_objects_schema(self): + tools = get_tool_definitions() + tool = next(t for t in tools if t["function"]["name"] == "find_similar_objects") + params = tool["function"]["parameters"]["properties"] + self.assertIn("event_id", params) + self.assertIn("after", params) + self.assertIn("before", params) + self.assertIn("cameras", params) + self.assertIn("labels", params) + self.assertIn("sub_labels", params) + self.assertIn("zones", params) + self.assertIn("similarity_mode", params) + self.assertIn("min_score", params) + self.assertIn("limit", params) + self.assertEqual(tool["function"]["parameters"]["required"], ["event_id"]) + self.assertEqual( + params["similarity_mode"]["enum"], ["visual", "semantic", "fused"] + ) + + +class TestExecuteFindSimilarObjects(unittest.TestCase): + def setUp(self): + self.tmp = tempfile.NamedTemporaryFile(suffix=".db", delete=False) + self.tmp.close() + self.db = SqliteExtDatabase(self.tmp.name) + Event.bind(self.db, bind_refs=False, bind_backrefs=False) + self.db.connect() + self.db.create_tables([Event]) + + # Insert an anchor plus two candidates. + def make(event_id, label="car", camera="driveway", start=1_700_000_100): + Event.create( + id=event_id, + label=label, + sub_label=None, + camera=camera, + start_time=start, + end_time=start + 10, + top_score=0.9, + score=0.9, + false_positive=False, + zones=[], + thumbnail="", + has_clip=True, + has_snapshot=True, + region=[0, 0, 1, 1], + box=[0, 0, 1, 1], + area=1, + retain_indefinitely=False, + ratio=1.0, + plus_id="", + model_hash="", + detector_type="", + model_type="", + data={"description": "a green sedan"}, + ) + + make("anchor", start=1_700_000_200) + make("cand_a", start=1_700_000_100) + make("cand_b", start=1_700_000_150) + self.make = make + + def tearDown(self): + self.db.close() + os.unlink(self.tmp.name) + + def _make_request(self, semantic_enabled=True, embeddings=None): + app = SimpleNamespace( + embeddings=embeddings, + frigate_config=SimpleNamespace( + semantic_search=SimpleNamespace(enabled=semantic_enabled), + ), + ) + return SimpleNamespace(app=app) + + def test_semantic_search_disabled_returns_error(self): + req = self._make_request(semantic_enabled=False) + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor"}, + allowed_cameras=["driveway"], + ) + ) + self.assertEqual(result["error"], "semantic_search_disabled") + + def test_anchor_not_found_returns_error(self): + embeddings = MagicMock() + req = self._make_request(embeddings=embeddings) + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "nope"}, + allowed_cameras=["driveway"], + ) + ) + self.assertEqual(result["error"], "anchor_not_found") + + def test_empty_candidates_returns_empty_results(self): + embeddings = MagicMock() + req = self._make_request(embeddings=embeddings) + # Filter to a camera with no other events. + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor", "cameras": ["nonexistent_cam"]}, + allowed_cameras=["nonexistent_cam"], + ) + ) + self.assertEqual(result["results"], []) + self.assertFalse(result["candidate_truncated"]) + self.assertEqual(result["anchor"]["id"], "anchor") + + def test_fused_calls_both_searches_and_ranks(self): + embeddings = MagicMock() + # cand_a visually closer, cand_b semantically closer. + embeddings.search_thumbnail.return_value = [ + ("cand_a", 0.10), + ("cand_b", 0.40), + ] + embeddings.search_description.return_value = [ + ("cand_a", 0.50), + ("cand_b", 0.20), + ] + embeddings.thumb_stats = ZScoreNormalization() + embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5]) + embeddings.desc_stats = ZScoreNormalization() + embeddings.desc_stats._update([0.1, 0.2, 0.3, 0.4, 0.5]) + + req = self._make_request(embeddings=embeddings) + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor"}, + allowed_cameras=["driveway"], + ) + ) + embeddings.search_thumbnail.assert_called_once() + embeddings.search_description.assert_called_once() + # cand_a should rank first because visual is weighted higher. + self.assertEqual(result["results"][0]["id"], "cand_a") + self.assertIn("score", result["results"][0]) + self.assertEqual(result["similarity_mode"], "fused") + + def test_visual_mode_only_calls_thumbnail(self): + embeddings = MagicMock() + embeddings.search_thumbnail.return_value = [("cand_a", 0.1)] + embeddings.thumb_stats = ZScoreNormalization() + embeddings.thumb_stats._update([0.1, 0.2, 0.3]) + + req = self._make_request(embeddings=embeddings) + _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor", "similarity_mode": "visual"}, + allowed_cameras=["driveway"], + ) + ) + embeddings.search_thumbnail.assert_called_once() + embeddings.search_description.assert_not_called() + + def test_semantic_mode_only_calls_description(self): + embeddings = MagicMock() + embeddings.search_description.return_value = [("cand_a", 0.1)] + embeddings.desc_stats = ZScoreNormalization() + embeddings.desc_stats._update([0.1, 0.2, 0.3]) + + req = self._make_request(embeddings=embeddings) + _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor", "similarity_mode": "semantic"}, + allowed_cameras=["driveway"], + ) + ) + embeddings.search_description.assert_called_once() + embeddings.search_thumbnail.assert_not_called() + + def test_min_score_drops_low_scoring_results(self): + embeddings = MagicMock() + embeddings.search_thumbnail.return_value = [ + ("cand_a", 0.10), + ("cand_b", 0.90), + ] + embeddings.search_description.return_value = [] + embeddings.thumb_stats = ZScoreNormalization() + embeddings.thumb_stats._update([0.1, 0.2, 0.3, 0.4, 0.5]) + embeddings.desc_stats = ZScoreNormalization() + + req = self._make_request(embeddings=embeddings) + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor", "similarity_mode": "visual", "min_score": 0.6}, + allowed_cameras=["driveway"], + ) + ) + ids = [r["id"] for r in result["results"]] + self.assertIn("cand_a", ids) + self.assertNotIn("cand_b", ids) + + def test_labels_defaults_to_anchor_label(self): + self.make("person_a", label="person") + embeddings = MagicMock() + embeddings.search_thumbnail.return_value = [ + ("cand_a", 0.1), + ("cand_b", 0.2), + ] + embeddings.search_description.return_value = [] + embeddings.thumb_stats = ZScoreNormalization() + embeddings.thumb_stats._update([0.1, 0.2, 0.3]) + embeddings.desc_stats = ZScoreNormalization() + + req = self._make_request(embeddings=embeddings) + result = _run( + _execute_find_similar_objects( + req, + {"event_id": "anchor", "similarity_mode": "visual"}, + allowed_cameras=["driveway"], + ) + ) + ids = [r["id"] for r in result["results"]] + self.assertNotIn("person_a", ids) + + +if __name__ == "__main__": + unittest.main() diff --git a/frigate/test/test_deferred_processor.py b/frigate/test/test_deferred_processor.py new file mode 100644 index 000000000..c76b445fa --- /dev/null +++ b/frigate/test/test_deferred_processor.py @@ -0,0 +1,211 @@ +"""Tests for DeferredRealtimeProcessorApi.""" + +import sys +import time +import unittest +from typing import Any +from unittest.mock import MagicMock, patch + +import numpy as np + +from frigate.data_processing.real_time.api import DeferredRealtimeProcessorApi + +# Mock TFLite before importing classification module +_MOCK_MODULES = [ + "tflite_runtime", + "tflite_runtime.interpreter", + "ai_edge_litert", + "ai_edge_litert.interpreter", +] +for mod in _MOCK_MODULES: + if mod not in sys.modules: + sys.modules[mod] = MagicMock() + +from frigate.data_processing.real_time.custom_classification import ( # noqa: E402 + CustomObjectClassificationProcessor, +) + + +class StubDeferredProcessor(DeferredRealtimeProcessorApi): + """Minimal concrete subclass for testing the deferred base.""" + + def __init__(self, max_queue: int = 8): + config = MagicMock() + metrics = MagicMock() + super().__init__(config, metrics, max_queue=max_queue) + self.processed_items: list[tuple] = [] + + def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None: + """Enqueue every call — no gating logic in the stub.""" + self._enqueue_task(("frame", obj_data, frame.copy())) + + def _process_task(self, task: tuple) -> None: + kind = task[0] + if kind == "frame": + _, obj_data, frame = task + self.processed_items.append((obj_data["id"], frame.shape)) + self._emit_result( + { + "type": "test_result", + "id": obj_data["id"], + "label": "cat", + "score": 0.95, + } + ) + elif kind == "expire": + _, object_id = task + self.processed_items.append(("expired", object_id)) + + def handle_request( + self, topic: str, request_data: dict[str, Any] + ) -> dict[str, Any] | None: + if topic == "reload": + + def _do_reload(data): + return {"success": True, "model": data.get("name")} + + return self._enqueue_request(_do_reload, request_data) + return None + + def expire_object(self, object_id: str, camera: str) -> None: + self._enqueue_task(("expire", object_id)) + + +class TestDeferredProcessorBase(unittest.TestCase): + def test_enqueue_and_drain(self): + """Tasks enqueued on main thread are processed by worker, results are drainable.""" + proc = StubDeferredProcessor() + frame = np.zeros((100, 100, 3), dtype=np.uint8) + proc.process_frame({"id": "obj1"}, frame) + proc.process_frame({"id": "obj2"}, frame) + + # Give the worker time to process + time.sleep(0.1) + + results = proc.drain_results() + self.assertEqual(len(results), 2) + self.assertEqual(results[0]["id"], "obj1") + self.assertEqual(results[1]["id"], "obj2") + + # Second drain should be empty + self.assertEqual(len(proc.drain_results()), 0) + + def test_backpressure_drops_tasks(self): + """When queue is full, new tasks are silently dropped.""" + proc = StubDeferredProcessor(max_queue=2) + + frame = np.zeros((10, 10, 3), dtype=np.uint8) + for i in range(10): + proc.process_frame({"id": f"obj{i}"}, frame) + + time.sleep(0.2) + results = proc.drain_results() + # The key property: no crash, no unbounded growth + self.assertLessEqual(len(results), 10) + self.assertGreater(len(results), 0) + + def test_handle_request_through_worker(self): + """handle_request blocks until the worker processes it and returns a response.""" + proc = StubDeferredProcessor() + result = proc.handle_request("reload", {"name": "my_model"}) + self.assertEqual(result, {"success": True, "model": "my_model"}) + + def test_expire_object_serialized_with_work(self): + """expire_object goes through the queue, serialized with inference work.""" + proc = StubDeferredProcessor() + frame = np.zeros((10, 10, 3), dtype=np.uint8) + proc.process_frame({"id": "obj1"}, frame) + proc.expire_object("obj1", "front_door") + + time.sleep(0.1) + # Both should have been processed in order + self.assertEqual(len(proc.processed_items), 2) + self.assertEqual(proc.processed_items[0][0], "obj1") + self.assertEqual(proc.processed_items[1], ("expired", "obj1")) + + def test_shutdown_joins_worker(self): + """shutdown() signals the worker to stop and joins the thread.""" + proc = StubDeferredProcessor() + proc.shutdown() + self.assertFalse(proc._worker.is_alive()) + + def test_drain_results_returns_list(self): + """drain_results returns a plain list, not a deque.""" + proc = StubDeferredProcessor() + results = proc.drain_results() + self.assertIsInstance(results, list) + + +class TestCustomObjectClassificationDeferred(unittest.TestCase): + """Test that CustomObjectClassificationProcessor uses the deferred pattern correctly.""" + + def _make_processor(self): + config = MagicMock() + model_config = MagicMock() + model_config.name = "test_breed" + model_config.object_config = MagicMock() + model_config.object_config.objects = ["dog"] + model_config.threshold = 0.5 + model_config.save_attempts = 10 + model_config.object_config.classification_type = "sub_label" + publisher = MagicMock() + requestor = MagicMock() + metrics = MagicMock() + metrics.classification_speeds = {} + metrics.classification_cps = {} + + with patch.object( + CustomObjectClassificationProcessor, + "_CustomObjectClassificationProcessor__build_detector", + ): + proc = CustomObjectClassificationProcessor( + config, model_config, publisher, requestor, metrics + ) + proc.interpreter = None + proc.tensor_input_details = [{"index": 0}] + proc.tensor_output_details = [{"index": 0}] + proc.labelmap = {0: "labrador", 1: "poodle", 2: "none"} + return proc + + def test_is_deferred_processor(self): + """CustomObjectClassificationProcessor should be a DeferredRealtimeProcessorApi.""" + proc = self._make_processor() + self.assertIsInstance(proc, DeferredRealtimeProcessorApi) + + def test_expire_clears_history(self): + """expire_object should clear classification history for the object.""" + proc = self._make_processor() + proc.classification_history["obj1"] = [("labrador", 0.9, 1.0)] + + proc.expire_object("obj1", "front") + time.sleep(0.1) + + self.assertNotIn("obj1", proc.classification_history) + + def test_drain_results_empty_when_no_model(self): + """With no interpreter, process_frame saves training images but emits no results.""" + proc = self._make_processor() + proc.interpreter = None + + frame = np.zeros((150, 100), dtype=np.uint8) + obj_data = { + "id": "obj1", + "label": "dog", + "false_positive": False, + "end_time": None, + "box": [10, 10, 50, 50], + "camera": "front", + } + + with patch( + "frigate.data_processing.real_time.custom_classification.write_classification_attempt" + ): + proc.process_frame(obj_data, frame) + + time.sleep(0.1) + results = proc.drain_results() + self.assertEqual(len(results), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/frigate/util/object.py b/frigate/util/object.py index 7f38438f4..7c7edc10c 100644 --- a/frigate/util/object.py +++ b/frigate/util/object.py @@ -62,11 +62,12 @@ def get_camera_regions_grid( .where((Event.false_positive == None) | (Event.false_positive == False)) .where(Event.start_time > last_update) ) - valid_event_ids = [e["id"] for e in events.dicts()] - logger.debug(f"Found {len(valid_event_ids)} new events for {name}") + + event_count = events.count() + logger.debug(f"Found {event_count} new events for {name}") # no new events, return as is - if not valid_event_ids: + if event_count == 0: return grid new_update = datetime.datetime.now().timestamp() @@ -78,7 +79,7 @@ def get_camera_regions_grid( Timeline.data, ] ) - .where(Timeline.source_id << valid_event_ids) + .where(Timeline.source_id << events) .limit(10000) .dicts() ) diff --git a/web/e2e/fixtures/error-allowlist.ts b/web/e2e/fixtures/error-allowlist.ts new file mode 100644 index 000000000..4e6523bd0 --- /dev/null +++ b/web/e2e/fixtures/error-allowlist.ts @@ -0,0 +1,116 @@ +/** + * Global allowlist of regex patterns that the error collector ignores. + * + * Each entry MUST include a comment explaining what it silences and why. + * The allowlist is filtered at collection time, so failure messages list + * only unfiltered errors. + * + * Per-spec additions go through the `expectedErrors` test fixture parameter + * (see error-collector.ts), not by editing this file. That keeps allowlist + * drift visible per-PR rather than buried in shared infrastructure. + * + * NOTE ON CONSOLE vs REQUEST ERRORS: + * When a network request returns a 5xx response, the browser emits two + * events that the error collector captures: + * [request] "500 Internal Server Error " — from onResponse (URL included) + * [console] "Failed to load resource: ..." — from onConsole (URL NOT included) + * + * The request-level message includes the URL, so those patterns are specific. + * The console-level message text (from ConsoleMessage.text()) does NOT include + * the URL — the URL is stored separately in e.url. Therefore the console + * pattern for HTTP 500s cannot be URL-discriminated, and a single pattern + * covers all such browser echoes. This is safe because every such console + * error is already caught (and specifically matched) by its paired [request] + * entry below. + */ + +export const GLOBAL_ALLOWLIST: RegExp[] = [ + // ------------------------------------------------------------------------- + // Browser echo of HTTP 5xx responses (console mirror of [request] events). + // + // Whenever the browser receives a 5xx response it emits a console error: + // "Failed to load resource: the server responded with a status of 500 + // (Internal Server Error)" + // The URL is NOT part of ConsoleMessage.text() — it is stored separately. + // Every console error of this form is therefore paired with a specific + // [request] 500 entry below that names the exact endpoint. Allowlisting + // this pattern here silences the browser echo; the request-level entries + // enforce specificity. + // ------------------------------------------------------------------------- + /Failed to load resource: the server responded with a status of 500/, + + // ------------------------------------------------------------------------- + // Mock infrastructure gaps — API endpoints not yet covered by ApiMocker. + // + // These produce 500s because Vite's preview server has no handler for them. + // Each is a TODO(real-bug): the mock should be extended so these endpoints + // return sensible fixture data in tests. + // + // Only [request] patterns are listed here; the paired [console] mirror is + // covered by the "Failed to load resource" entry above. + // ------------------------------------------------------------------------- + + // TODO(real-bug): ApiMocker registers "**/api/reviews**" (plural) but the + // app fetches /api/review (singular) for the review list and timeline. + // Affects: review.spec.ts, navigation.spec.ts, live.spec.ts, auth.spec.ts. + // Fix: add route handlers for /api/review and /api/review/** in api-mocker.ts. + /500 Internal Server Error.*\/api\/review(\?|\/|$)/, + + // TODO(real-bug): /api/stats/history is not mocked; the system page fetches + // it for the detector/process history charts. + // Fix: add route handler for /api/stats/history in api-mocker.ts. + /500 Internal Server Error.*\/api\/stats\/history/, + + // TODO(real-bug): /api/event_ids is not mocked; the explore/search page + // fetches it to resolve event IDs for display. + // Fix: add route handler for /api/event_ids in api-mocker.ts. + /500 Internal Server Error.*\/api\/event_ids/, + + // TODO(real-bug): /api/sub_labels?split_joined=1 returns 500; the mock + // registers "**/api/sub_labels" which may not match when a query string is + // present, or route registration order causes the catch-all to win first. + // Fix: change the mock route to "**/api/sub_labels**" in api-mocker.ts. + /500 Internal Server Error.*\/api\/sub_labels/, + + // TODO(real-bug): MediaMocker handles /api/*/latest.jpg but the app also + // requests /api/*/latest.webp (webp format) for camera snapshots. + // Affects: live.spec.ts, review.spec.ts, auth.spec.ts, navigation.spec.ts. + // Fix: add route handler for /api/*/latest.webp in MediaMocker.install(). + /500 Internal Server Error.*\/api\/[^/]+\/latest\.webp/, + /failed: net::ERR_ABORTED.*\/api\/[^/]+\/latest\.webp/, + + // ------------------------------------------------------------------------- + // Mock infrastructure gap — WebSocket streams. + // + // Playwright's page.route() does not intercept WebSocket connections. + // The jsmpeg live-stream WS connections to /live/jsmpeg/* always fail + // with a 500 handshake error because the Vite preview server has no WS + // handler. TODO(real-bug): add WsMocker support for jsmpeg WebSocket + // connections, or suppress the connection attempt in the test environment. + // Affects: live.spec.ts (single camera view), auth.spec.ts. + // ------------------------------------------------------------------------- + /WebSocket connection to '.*\/live\/jsmpeg\/.*' failed/, + + // ------------------------------------------------------------------------- + // Benign — lazy-loaded chunk aborts during navigation. + // + // When a test navigates away from a page while the browser is still + // fetching lazily-split JS/CSS asset chunks, the in-flight fetch is + // cancelled (net::ERR_ABORTED). This is normal browser behaviour on + // navigation and does not indicate a real error; the assets load fine + // on a stable connection. + // ------------------------------------------------------------------------- + /failed: net::ERR_ABORTED.*\/assets\//, + + // ------------------------------------------------------------------------- + // Real app bug — Radix UI DialogContent missing accessible title. + // + // TODO(real-bug): A dialog somewhere in the app renders + // without a , violating Radix UI's accessibility contract. + // The warning originates from the bundled main-*.js. Investigate which + // dialog component is missing the title and add a VisuallyHidden DialogTitle. + // Likely candidate: face-library or search-detail dialog in explore page. + // See: https://radix-ui.com/primitives/docs/components/dialog + // ------------------------------------------------------------------------- + /`DialogContent` requires a `DialogTitle`/, +]; diff --git a/web/e2e/fixtures/error-collector.ts b/web/e2e/fixtures/error-collector.ts new file mode 100644 index 000000000..7cba52664 --- /dev/null +++ b/web/e2e/fixtures/error-collector.ts @@ -0,0 +1,122 @@ +/** + * Collects console errors, page errors, and failed network requests + * during a Playwright test, with regex-based allowlist filtering. + * + * Usage: + * const collector = installErrorCollector(page, [...GLOBAL_ALLOWLIST]); + * // ... run test ... + * collector.assertClean(); // throws if any non-allowlisted error + * + * The collector is wired into the `frigateApp` fixture so every test + * gets it for free. Tests that intentionally trigger an error pass + * additional regexes via the `expectedErrors` fixture parameter. + */ + +import type { Page, Request, Response, ConsoleMessage } from "@playwright/test"; + +export type CollectedError = { + kind: "console" | "pageerror" | "request"; + message: string; + url?: string; + stack?: string; +}; + +export type ErrorCollector = { + errors: CollectedError[]; + assertClean(): void; +}; + +function isAllowlisted(message: string, allowlist: RegExp[]): boolean { + return allowlist.some((pattern) => pattern.test(message)); +} + +function firstStackFrame(stack: string | undefined): string | undefined { + if (!stack) return undefined; + const lines = stack + .split("\n") + .map((l) => l.trim()) + .filter(Boolean); + // Skip the error message line (line 0); return the first "at ..." frame + return lines.find((l) => l.startsWith("at ")); +} + +function isSameOrigin(url: string, baseURL: string | undefined): boolean { + if (!baseURL) return true; + try { + return new URL(url).origin === new URL(baseURL).origin; + } catch { + return false; + } +} + +export function installErrorCollector( + page: Page, + allowlist: RegExp[], +): ErrorCollector { + const errors: CollectedError[] = []; + const baseURL = ( + page.context() as unknown as { _options?: { baseURL?: string } } + )._options?.baseURL; + + const onConsole = (msg: ConsoleMessage) => { + if (msg.type() !== "error") return; + const text = msg.text(); + if (isAllowlisted(text, allowlist)) return; + errors.push({ + kind: "console", + message: text, + url: msg.location().url, + }); + }; + + const onPageError = (err: Error) => { + const text = err.message; + if (isAllowlisted(text, allowlist)) return; + errors.push({ + kind: "pageerror", + message: text, + stack: firstStackFrame(err.stack), + }); + }; + + const onResponse = (response: Response) => { + const status = response.status(); + if (status < 500) return; + const url = response.url(); + if (!isSameOrigin(url, baseURL)) return; + const text = `${status} ${response.statusText()} ${url}`; + if (isAllowlisted(text, allowlist)) return; + errors.push({ kind: "request", message: text, url }); + }; + + const onRequestFailed = (request: Request) => { + const url = request.url(); + if (!isSameOrigin(url, baseURL)) return; + const failure = request.failure(); + const text = `failed: ${failure?.errorText ?? "unknown"} ${url}`; + if (isAllowlisted(text, allowlist)) return; + errors.push({ kind: "request", message: text, url }); + }; + + page.on("console", onConsole); + page.on("pageerror", onPageError); + page.on("response", onResponse); + page.on("requestfailed", onRequestFailed); + + return { + errors, + assertClean() { + if (errors.length === 0) return; + const formatted = errors + .map((e, i) => { + const stack = e.stack ? `\n ${e.stack}` : ""; + const url = e.url && e.url !== e.message ? ` (${e.url})` : ""; + return ` ${i + 1}. [${e.kind}] ${e.message}${url}${stack}`; + }) + .join("\n"); + throw new Error( + `Page emitted ${errors.length} unexpected error${errors.length === 1 ? "" : "s"}:\n${formatted}`, + ); + }, + }; +} diff --git a/web/e2e/fixtures/frigate-test.ts b/web/e2e/fixtures/frigate-test.ts index 88a2945d7..bc28ab50c 100644 --- a/web/e2e/fixtures/frigate-test.ts +++ b/web/e2e/fixtures/frigate-test.ts @@ -6,6 +6,11 @@ * @playwright/test directly. The `frigateApp` fixture provides a * fully mocked Frigate frontend ready for interaction. * + * The fixture also installs the error collector (see error-collector.ts). + * Any console error, page error, or same-origin failed request that is + * not on the global allowlist or the test's `expectedErrors` list will + * fail the test in the fixture's teardown. + * * CRITICAL: All route/WS handlers are registered before page.goto() * to prevent AuthProvider from redirecting to login.html. */ @@ -17,6 +22,8 @@ import { type ApiMockOverrides, } from "../helpers/api-mocker"; import { WsMocker } from "../helpers/ws-mocker"; +import { installErrorCollector, type ErrorCollector } from "./error-collector"; +import { GLOBAL_ALLOWLIST } from "./error-allowlist"; export class FrigateApp { public api: ApiMocker; @@ -67,10 +74,43 @@ export class FrigateApp { type FrigateFixtures = { frigateApp: FrigateApp; + /** + * Per-test additional allowlist regex patterns. Tests that intentionally + * trigger errors (e.g. error-state tests that hit a mocked 500) declare + * their expected errors here so the collector ignores them. + * + * Default is `[]` — most tests should not need this. + */ + expectedErrors: RegExp[]; + errorCollector: ErrorCollector; }; export const test = base.extend({ - frigateApp: async ({ page }, use, testInfo) => { + expectedErrors: [[], { option: true }], + + errorCollector: async ({ page, expectedErrors }, use, testInfo) => { + const collector = installErrorCollector(page, [ + ...GLOBAL_ALLOWLIST, + ...expectedErrors, + ]); + await use(collector); + if (process.env.E2E_STRICT_ERRORS === "1") { + collector.assertClean(); + } else if (collector.errors.length > 0) { + // Soft mode: attach errors to the test report so they're visible + // without failing the run. + await testInfo.attach("collected-errors.txt", { + body: collector.errors + .map((e) => `[${e.kind}] ${e.message}${e.url ? ` (${e.url})` : ""}`) + .join("\n"), + contentType: "text/plain", + }); + } + }, + + frigateApp: async ({ page, errorCollector }, use, testInfo) => { + // Reference the collector so its `use()` runs and teardown fires + void errorCollector; const app = new FrigateApp(page, testInfo.project.name); await app.installDefaults(); await use(app); diff --git a/web/e2e/helpers/api-mocker.ts b/web/e2e/helpers/api-mocker.ts index 5de4ba86c..52f10d64b 100644 --- a/web/e2e/helpers/api-mocker.ts +++ b/web/e2e/helpers/api-mocker.ts @@ -82,14 +82,26 @@ export class ApiMocker { route.fulfill({ json: stats }), ); - // Reviews - await this.page.route("**/api/reviews**", (route) => { - const url = route.request().url(); - if (url.includes("summary")) { - return route.fulfill({ json: reviewSummary }); - } - return route.fulfill({ json: reviews }); - }); + // Reviews. The real backend exposes /review (singular) for the main + // list and /review/summary for the summary — the previous plural glob + // (**/api/reviews**) never matched either endpoint, so review-dependent + // tests silently ran without data. The POST mutations at /reviews/viewed + // and /reviews/delete (plural) still fall through to the generic + // mutation catch-all further down the file. + await this.page.route(/\/api\/review\/summary/, (route) => + route.fulfill({ json: reviewSummary }), + ); + await this.page.route(/\/api\/review(\?|$)/, (route) => + route.fulfill({ json: reviews }), + ); + + // Export jobs. The Exports page polls this every 2s while any export + // is in_progress; without a mock route it falls through to the preview + // server which returns 500 and makes the page flap between loading and + // rendered state, breaking tests that navigate to /export. + await this.page.route("**/api/jobs/export", (route) => + route.fulfill({ json: [] }), + ); // Recordings summary await this.page.route("**/api/recordings/summary**", (route) => diff --git a/web/e2e/helpers/mock-overrides.ts b/web/e2e/helpers/mock-overrides.ts new file mode 100644 index 000000000..71ea38e38 --- /dev/null +++ b/web/e2e/helpers/mock-overrides.ts @@ -0,0 +1,56 @@ +/** + * Per-test mock overrides for driving empty / loading / error states. + * + * Playwright route handlers are LIFO: the most recently registered handler + * matching a URL takes precedence. The frigateApp fixture installs default + * mocks before the test body runs, so these helpers — called inside the + * test body — register AFTER the defaults and therefore win. + * + * Always call these BEFORE the navigation that triggers the request. + * + * Example: + * await mockEmpty(page, "**\/api\/exports**"); + * await frigateApp.goto("/export"); + * // Page now renders the empty state + */ + +import type { Page } from "@playwright/test"; + +/** Return an empty array for the matched endpoint. */ +export async function mockEmpty( + page: Page, + urlPattern: string | RegExp, +): Promise { + await page.route(urlPattern, (route) => route.fulfill({ json: [] })); +} + +/** Return an HTTP error for the matched endpoint. Default status 500. */ +export async function mockError( + page: Page, + urlPattern: string | RegExp, + status = 500, +): Promise { + await page.route(urlPattern, (route) => + route.fulfill({ + status, + json: { success: false, message: "Mocked error" }, + }), + ); +} + +/** + * Delay the response by `ms` milliseconds before fulfilling with the + * provided body. Use to assert loading-state UI is visible during the + * delay window. + */ +export async function mockDelay( + page: Page, + urlPattern: string | RegExp, + ms: number, + body: unknown = [], +): Promise { + await page.route(urlPattern, async (route) => { + await new Promise((resolve) => setTimeout(resolve, ms)); + await route.fulfill({ json: body }); + }); +} diff --git a/web/e2e/pages/base.page.ts b/web/e2e/pages/base.page.ts index 4362f786f..e5628cb8c 100644 --- a/web/e2e/pages/base.page.ts +++ b/web/e2e/pages/base.page.ts @@ -79,4 +79,57 @@ export class BasePage { async waitForPageLoad() { await this.page.waitForSelector("#pageRoot", { timeout: 10_000 }); } + + /** + * Open the mobile-only export pane / sheet that slides up from the + * bottom on the export page. No-op on desktop. Returns the pane locator + * so the caller can assert against its contents. + */ + async openMobilePane(): Promise { + if (this.isDesktop) { + // Return the desktop equivalent (the main content area itself) + return this.pageRoot; + } + // Look for any element that opens a sheet/dialog on tap. + // Specific views override this with their own selector. + const pane = this.page.locator('[role="dialog"]').first(); + return pane; + } + + /** + * Open a side drawer (e.g. mobile filter drawer). View-specific page + * objects should override this with their actual trigger selector. + * The default implementation looks for a button labelled "Open menu" + * or "Filters" and clicks it, then returns the drawer locator. + */ + async openDrawer(): Promise { + if (this.isDesktop) { + return this.pageRoot; + } + const trigger = this.page + .getByRole("button", { name: /menu|filter/i }) + .first(); + if (await trigger.count()) { + await trigger.click(); + } + return this.page.locator('[role="dialog"], [data-state="open"]').first(); + } + + /** + * Open a bottom sheet (vaul). View-specific page objects should + * override this with their actual trigger selector. + */ + async openBottomSheet(): Promise { + if (this.isDesktop) { + return this.pageRoot; + } + return this.page.locator("[vaul-drawer]").first(); + } + + /** Close any currently-open mobile overlay (drawer, sheet, dialog). */ + async closeMobileOverlay(): Promise { + if (this.isDesktop) return; + // Press Escape — Radix dialogs and vaul both close on Escape + await this.page.keyboard.press("Escape"); + } } diff --git a/web/e2e/scripts/lint-specs.mjs b/web/e2e/scripts/lint-specs.mjs new file mode 100644 index 000000000..4724e99bb --- /dev/null +++ b/web/e2e/scripts/lint-specs.mjs @@ -0,0 +1,160 @@ +#!/usr/bin/env node +/** + * Lint script for e2e specs. Bans lenient test patterns and requires + * a @mobile-tagged test in every spec under specs/ (excluding _meta/). + * + * Banned patterns: + * - page.waitForTimeout( — use expect().toPass() or waitFor instead + * - if (await ... .isVisible()) — assertions must be unconditional + * - if ((await ... .count()) > 0) — same as above + * - expect(... .length).toBeGreaterThan(0) on textContent results + * + * Escape hatch: append `// e2e-lint-allow` on any line to silence the + * check for that line. Use sparingly and explain why in a comment above. + * + * @mobile rule: every .spec.ts under specs/ (not specs/_meta/) must + * contain at least one test title or describe with the substring "@mobile". + * + * Specs in PENDING_REWRITE are exempt from all rules until they are + * rewritten with proper assertions and mobile coverage. Remove each + * entry when its spec is updated. + */ + +import { readFileSync, readdirSync, statSync } from "node:fs"; +import { join, relative, resolve, dirname } from "node:path"; +import { fileURLToPath } from "node:url"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const SPECS_DIR = resolve(__dirname, "..", "specs"); +const META_PREFIX = resolve(SPECS_DIR, "_meta"); + +// Specs exempt from lint rules until they are rewritten with proper +// assertions and mobile coverage. Remove each entry when its spec is updated. +const PENDING_REWRITE = new Set([ + "auth.spec.ts", + "chat.spec.ts", + "classification.spec.ts", + "config-editor.spec.ts", + "explore.spec.ts", + "export.spec.ts", + "face-library.spec.ts", + "live.spec.ts", + "logs.spec.ts", + "navigation.spec.ts", + "replay.spec.ts", + "review.spec.ts", + "system.spec.ts", +]); + +const BANNED_PATTERNS = [ + { + name: "page.waitForTimeout", + regex: /\bwaitForTimeout\s*\(/, + advice: + "Use expect.poll(), expect(...).toPass(), or waitFor() with a real condition.", + }, + { + name: "conditional isVisible() assertion", + regex: /\bif\s*\(\s*await\s+[^)]*\.isVisible\s*\(/, + advice: + "Assertions must be unconditional. Use expect(...).toBeVisible() instead.", + }, + { + name: "conditional count() assertion", + regex: /\bif\s*\(\s*\(?\s*await\s+[^)]*\.count\s*\(\s*\)\s*\)?\s*[><=!]/, + advice: + "Assertions must be unconditional. Use expect(...).toHaveCount(n).", + }, + { + name: "vacuous textContent length assertion", + regex: /expect\([^)]*\.length\)\.toBeGreaterThan\(0\)/, + advice: + "Assert specific content, not that some text exists.", + }, +]; + +function walk(dir) { + const entries = readdirSync(dir); + const out = []; + for (const entry of entries) { + const full = join(dir, entry); + const st = statSync(full); + if (st.isDirectory()) { + out.push(...walk(full)); + } else if (entry.endsWith(".spec.ts")) { + out.push(full); + } + } + return out; +} + +function lintFile(file) { + const basename = file.split("/").pop(); + if (PENDING_REWRITE.has(basename)) return []; + if (file.includes("/specs/settings/")) return []; + + const errors = []; + const text = readFileSync(file, "utf8"); + const lines = text.split("\n"); + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + if (line.includes("e2e-lint-allow")) continue; + for (const pat of BANNED_PATTERNS) { + if (pat.regex.test(line)) { + errors.push({ + file, + line: i + 1, + col: 1, + rule: pat.name, + message: `${pat.name}: ${pat.advice}`, + source: line.trim(), + }); + } + } + } + + // @mobile rule: skip _meta + const isMeta = file.startsWith(META_PREFIX); + if (!isMeta) { + if (!/@mobile\b/.test(text)) { + errors.push({ + file, + line: 1, + col: 1, + rule: "missing @mobile test", + message: + 'Spec must contain at least one test or describe tagged with "@mobile".', + source: "", + }); + } + } + + return errors; +} + +function main() { + const files = walk(SPECS_DIR); + const allErrors = []; + for (const f of files) { + allErrors.push(...lintFile(f)); + } + + if (allErrors.length === 0) { + console.log(`e2e:lint: ${files.length} spec files OK`); + process.exit(0); + } + + for (const err of allErrors) { + const rel = relative(process.cwd(), err.file); + console.error(`${rel}:${err.line}:${err.col} ${err.rule}`); + console.error(` ${err.message}`); + if (err.source) console.error(` > ${err.source}`); + } + console.error( + `\ne2e:lint: ${allErrors.length} error${allErrors.length === 1 ? "" : "s"} in ${files.length} files`, + ); + process.exit(1); +} + +main(); diff --git a/web/e2e/specs/_meta/error-collector.spec.ts b/web/e2e/specs/_meta/error-collector.spec.ts new file mode 100644 index 000000000..7a888d4b2 --- /dev/null +++ b/web/e2e/specs/_meta/error-collector.spec.ts @@ -0,0 +1,112 @@ +/** + * Self-tests for the error collector fixture itself. + * + * These guard against future regressions in the safety net. Each test + * deliberately triggers (or avoids triggering) an error to verify the + * collector behaves correctly. Tests that expect to fail use the + * `expectedErrors` fixture parameter to allowlist their own errors. + */ + +import { test, expect } from "../../fixtures/frigate-test"; + +// test.use applies to a whole describe block in Playwright, so each test +// that needs a custom allowlist gets its own describe. + +test.describe("Error Collector — clean @meta", () => { + test("clean page passes", async ({ frigateApp }) => { + await frigateApp.goto("/"); + // No errors triggered. The fixture teardown should not throw. + }); +}); + +test.describe("Error Collector — unallowlisted console error fails @meta", () => { + test("console.error fails the test when not allowlisted", async ({ + page, + frigateApp, + }) => { + test.skip( + process.env.E2E_STRICT_ERRORS !== "1", + "Requires E2E_STRICT_ERRORS=1 to assert failure", + ); + test.fail(); // We expect the fixture teardown to throw + await frigateApp.goto("/"); + await page.evaluate(() => { + // eslint-disable-next-line no-console + console.error("UNEXPECTED_DELIBERATE_TEST_ERROR_xyz123"); + }); + }); +}); + +test.describe("Error Collector — allowlisted console error passes @meta", () => { + test.use({ expectedErrors: [/ALLOWED_DELIBERATE_TEST_ERROR_xyz123/] }); + + test("console.error is silenced when allowlisted via expectedErrors", async ({ + page, + frigateApp, + }) => { + await frigateApp.goto("/"); + await page.evaluate(() => { + // eslint-disable-next-line no-console + console.error("ALLOWED_DELIBERATE_TEST_ERROR_xyz123"); + }); + }); +}); + +test.describe("Error Collector — uncaught pageerror fails @meta", () => { + test("uncaught pageerror fails the test", async ({ page, frigateApp }) => { + test.skip( + process.env.E2E_STRICT_ERRORS !== "1", + "Requires E2E_STRICT_ERRORS=1 to assert failure", + ); + test.fail(); + await frigateApp.goto("/"); + await page.evaluate(() => { + setTimeout(() => { + throw new Error("UNCAUGHT_DELIBERATE_TEST_ERROR_xyz789"); + }, 0); + }); + // Wait a frame to let the throw propagate before fixture teardown. + // The marker below silences the e2e:lint banned-pattern check on this line. + await page.waitForTimeout(100); // e2e-lint-allow: deliberate; need to await async throw + }); +}); + +test.describe("Error Collector — 5xx fails @meta", () => { + test("same-origin 5xx response fails the test", async ({ + page, + frigateApp, + }) => { + test.skip( + process.env.E2E_STRICT_ERRORS !== "1", + "Requires E2E_STRICT_ERRORS=1 to assert failure", + ); + test.fail(); + await page.route("**/api/version", (route) => + route.fulfill({ status: 500, body: "boom" }), + ); + await frigateApp.goto("/"); + await page.evaluate(() => fetch("/api/version").catch(() => {})); + // Give the response listener a microtask to fire + await expect.poll(async () => true).toBe(true); + }); +}); + +test.describe("Error Collector — allowlisted 5xx passes @meta", () => { + // Use a single alternation regex so test.use() receives a 1-element array. + // Playwright's isFixtureTuple() treats any [value, object] pair as a fixture + // tuple, so a 2-element array whose second item is a RegExp would be + // misinterpreted as [defaultValue, options]. Both the request collector + // error ("500 … /api/version") and the browser console error + // ("Failed to load resource … 500") are matched by the alternation below. + test.use({ + expectedErrors: [/500.*\/api\/version|Failed to load resource.*500/], + }); + + test("allowlisted 5xx passes", async ({ page, frigateApp }) => { + await page.route("**/api/version", (route) => + route.fulfill({ status: 500, body: "boom" }), + ); + await frigateApp.goto("/"); + await page.evaluate(() => fetch("/api/version").catch(() => {})); + }); +}); diff --git a/web/e2e/specs/_meta/mock-overrides.spec.ts b/web/e2e/specs/_meta/mock-overrides.spec.ts new file mode 100644 index 000000000..f3c1ae3df --- /dev/null +++ b/web/e2e/specs/_meta/mock-overrides.spec.ts @@ -0,0 +1,73 @@ +/** + * Self-tests for the mock override helpers. Verifies each helper + * intercepts the matched URL and returns the expected payload/status. + */ + +import { test, expect } from "../../fixtures/frigate-test"; +import { mockEmpty, mockError, mockDelay } from "../../helpers/mock-overrides"; + +test.describe("Mock Overrides — empty @meta", () => { + test("mockEmpty returns []", async ({ page, frigateApp }) => { + await mockEmpty(page, "**/api/__meta_test__"); + await frigateApp.goto("/"); + const result = await page.evaluate(async () => { + const r = await fetch("/api/__meta_test__"); + return { status: r.status, body: await r.json() }; + }); + expect(result.status).toBe(200); + expect(result.body).toEqual([]); + }); +}); + +test.describe("Mock Overrides — error default @meta", () => { + // Match both the collected request error and the browser's console echo. + // Using a single alternation regex avoids Playwright's isFixtureTuple + // collision with multi-element RegExp arrays. + test.use({ + expectedErrors: [/500.*__meta_test__|Failed to load resource.*500/], + }); + + test("mockError returns 500 by default", async ({ page, frigateApp }) => { + await mockError(page, "**/api/__meta_test__"); + await frigateApp.goto("/"); + const status = await page.evaluate(async () => { + const r = await fetch("/api/__meta_test__"); + return r.status; + }); + expect(status).toBe(500); + }); +}); + +test.describe("Mock Overrides — error custom status @meta", () => { + // The browser emits a "Failed to load resource" console.error for 404s, + // which the error collector catches even though 404 is not a 5xx. + test.use({ + expectedErrors: [/Failed to load resource.*404|404.*__meta_test_404__/], + }); + + test("mockError accepts a custom status", async ({ page, frigateApp }) => { + await mockError(page, "**/api/__meta_test_404__", 404); + await frigateApp.goto("/"); + const status = await page.evaluate(async () => { + const r = await fetch("/api/__meta_test_404__"); + return r.status; + }); + expect(status).toBe(404); + }); +}); + +test.describe("Mock Overrides — delay @meta", () => { + test("mockDelay delays response by the requested ms", async ({ + page, + frigateApp, + }) => { + await mockDelay(page, "**/api/__meta_test_delay__", 300, ["delayed"]); + await frigateApp.goto("/"); + const elapsed = await page.evaluate(async () => { + const start = performance.now(); + await fetch("/api/__meta_test_delay__"); + return performance.now() - start; + }); + expect(elapsed).toBeGreaterThanOrEqual(250); + }); +}); diff --git a/web/e2e/specs/export.spec.ts b/web/e2e/specs/export.spec.ts index 07454231a..605e2dca4 100644 --- a/web/e2e/specs/export.spec.ts +++ b/web/e2e/specs/export.spec.ts @@ -1,74 +1,734 @@ -/** - * Export page tests -- HIGH tier. - * - * Tests export card rendering with mock data, search filtering, - * and delete confirmation dialog. - */ - import { test, expect } from "../fixtures/frigate-test"; -test.describe("Export Page - Cards @high", () => { - test("export page renders export cards from mock data", async ({ +test.describe("Export Page - Overview @high", () => { + test("renders uncategorized exports and case cards from mock data", async ({ frigateApp, }) => { await frigateApp.goto("/export"); - await frigateApp.page.waitForTimeout(2000); - // Should show export names from our mock data + await expect( frigateApp.page.getByText("Front Door - Person Alert"), - ).toBeVisible({ timeout: 10_000 }); + ).toBeVisible(); await expect( - frigateApp.page.getByText("Backyard - Car Detection"), + frigateApp.page.getByText("Garage - In Progress"), + ).toBeVisible(); + await expect( + frigateApp.page.getByText("Package Theft Investigation"), ).toBeVisible(); }); - test("export page shows in-progress indicator", async ({ frigateApp }) => { + test("search filters uncategorized exports", async ({ frigateApp }) => { await frigateApp.goto("/export"); - await frigateApp.page.waitForTimeout(2000); - // "Garage - In Progress" export should be visible - await expect(frigateApp.page.getByText("Garage - In Progress")).toBeVisible( - { timeout: 10_000 }, - ); + + const searchInput = frigateApp.page.getByPlaceholder(/search/i).first(); + await searchInput.fill("Front Door"); + + await expect( + frigateApp.page.getByText("Front Door - Person Alert"), + ).toBeVisible(); + await expect( + frigateApp.page.getByText("Backyard - Car Detection"), + ).toBeHidden(); + await expect( + frigateApp.page.getByText("Garage - In Progress"), + ).toBeHidden(); }); - test("export page shows case grouping", async ({ frigateApp }) => { + test("new case button opens the create case dialog", async ({ + frigateApp, + }) => { await frigateApp.goto("/export"); - await frigateApp.page.waitForTimeout(3000); - // Cases may render differently depending on API response shape - const pageText = await frigateApp.page.textContent("#pageRoot"); - expect(pageText?.length).toBeGreaterThan(0); + + await frigateApp.page.getByRole("button", { name: "New Case" }).click(); + + await expect( + frigateApp.page.getByRole("dialog").filter({ hasText: "Create Case" }), + ).toBeVisible(); + await expect(frigateApp.page.getByPlaceholder("Case name")).toBeVisible(); }); }); -test.describe("Export Page - Search @high", () => { - test("search input filters export list", async ({ frigateApp }) => { +test.describe("Export Page - Case Detail @high", () => { + test("opening a case shows its detail view and associated export", async ({ + frigateApp, + }) => { await frigateApp.goto("/export"); - await frigateApp.page.waitForTimeout(2000); - const searchInput = frigateApp.page.locator( - '#pageRoot input[type="text"], #pageRoot input', + + await frigateApp.page + .getByText("Package Theft Investigation") + .first() + .click(); + + await expect( + frigateApp.page.getByRole("heading", { + name: "Package Theft Investigation", + }), + ).toBeVisible(); + await expect( + frigateApp.page.getByText("Backyard - Car Detection"), + ).toBeVisible(); + await expect( + frigateApp.page.getByRole("button", { name: "Add Export" }), + ).toBeVisible(); + await expect( + frigateApp.page.getByRole("button", { name: "Edit Case" }), + ).toBeVisible(); + await expect( + frigateApp.page.getByRole("button", { name: "Delete Case" }), + ).toBeVisible(); + }); + + test("edit case opens a prefilled dialog", async ({ frigateApp }) => { + await frigateApp.goto("/export"); + + await frigateApp.page + .getByText("Package Theft Investigation") + .first() + .click(); + await frigateApp.page.getByRole("button", { name: "Edit Case" }).click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: "Edit Case" }); + await expect(dialog).toBeVisible(); + await expect(dialog.locator("input")).toHaveValue( + "Package Theft Investigation", ); - if ( - (await searchInput.count()) > 0 && - (await searchInput.first().isVisible()) - ) { - // Type a search term that matches one export - await searchInput.first().fill("Front Door"); - await frigateApp.page.waitForTimeout(500); - // "Front Door - Person Alert" should still be visible - await expect( - frigateApp.page.getByText("Front Door - Person Alert"), - ).toBeVisible(); + await expect(dialog.locator("textarea")).toHaveValue( + "Review of suspicious activity near the front porch", + ); + }); + + test("add export shows completed uncategorized exports for assignment", async ({ + frigateApp, + }) => { + await frigateApp.goto("/export"); + + await frigateApp.page + .getByText("Package Theft Investigation") + .first() + .click(); + await frigateApp.page.getByRole("button", { name: "Add Export" }).click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: "Add Export to Package Theft Investigation" }); + await expect(dialog).toBeVisible(); + // Completed, uncategorized exports are selectable + await expect(dialog.getByText("Front Door - Person Alert")).toBeVisible(); + // In-progress exports are intentionally hidden by AssignExportDialog + // (see Exports.tsx filteredExports) — they can't be assigned until + // they finish, so they should not show in the picker. + await expect(dialog.getByText("Garage - In Progress")).toBeHidden(); + }); + + test("delete case opens a confirmation dialog", async ({ frigateApp }) => { + await frigateApp.goto("/export"); + + await frigateApp.page + .getByText("Package Theft Investigation") + .first() + .click(); + await frigateApp.page.getByRole("button", { name: "Delete Case" }).click(); + + const dialog = frigateApp.page + .getByRole("alertdialog") + .filter({ hasText: "Delete Case" }); + await expect(dialog).toBeVisible(); + await expect(dialog.getByText(/Package Theft Investigation/)).toBeVisible(); + }); + + test("delete case can also delete its exports", async ({ frigateApp }) => { + let deleteRequestUrl: string | null = null; + let deleteCaseCompleted = false; + + const initialCases = [ + { + id: "case-001", + name: "Package Theft Investigation", + description: "Review of suspicious activity near the front porch", + created_at: 1775407931.3863528, + updated_at: 1775483531.3863528, + }, + ]; + + const initialExports = [ + { + id: "export-001", + camera: "front_door", + name: "Front Door - Person Alert", + date: 1775490731.3863528, + video_path: "/exports/export-001.mp4", + thumb_path: "/exports/export-001-thumb.jpg", + in_progress: false, + export_case_id: null, + }, + { + id: "export-002", + camera: "backyard", + name: "Backyard - Car Detection", + date: 1775483531.3863528, + video_path: "/exports/export-002.mp4", + thumb_path: "/exports/export-002-thumb.jpg", + in_progress: false, + export_case_id: "case-001", + }, + { + id: "export-003", + camera: "garage", + name: "Garage - In Progress", + date: 1775492531.3863528, + video_path: "/exports/export-003.mp4", + thumb_path: "/exports/export-003-thumb.jpg", + in_progress: true, + export_case_id: null, + }, + ]; + + await frigateApp.page.route(/\/api\/cases(?:$|\?|\/)/, async (route) => { + const request = route.request(); + + if (request.method() === "DELETE") { + deleteRequestUrl = request.url(); + deleteCaseCompleted = true; + return route.fulfill({ json: { success: true } }); + } + + if (request.method() === "GET") { + return route.fulfill({ + json: deleteCaseCompleted ? [] : initialCases, + }); + } + + return route.fallback(); + }); + + await frigateApp.page.route("**/api/exports**", async (route) => { + if (route.request().method() !== "GET") { + return route.fallback(); + } + + return route.fulfill({ + json: deleteCaseCompleted + ? initialExports.filter((exp) => exp.export_case_id !== "case-001") + : initialExports, + }); + }); + + await frigateApp.goto("/export"); + + await frigateApp.page + .getByText("Package Theft Investigation") + .first() + .click(); + await frigateApp.page.getByRole("button", { name: "Delete Case" }).click(); + + const dialog = frigateApp.page + .getByRole("alertdialog") + .filter({ hasText: "Delete Case" }); + await expect(dialog).toBeVisible(); + + const deleteExportsSwitch = dialog.getByRole("switch", { + name: "Also delete exports", + }); + await expect(deleteExportsSwitch).toHaveAttribute("aria-checked", "false"); + await expect( + dialog.getByText( + "Exports will remain available as uncategorized exports.", + ), + ).toBeVisible(); + + await deleteExportsSwitch.click(); + + await expect(deleteExportsSwitch).toHaveAttribute("aria-checked", "true"); + await expect( + dialog.getByText("All exports in this case will be permanently deleted."), + ).toBeVisible(); + + await dialog.getByRole("button", { name: /^delete$/i }).click(); + + await expect + .poll(() => deleteRequestUrl) + .toContain("/api/cases/case-001?delete_exports=true"); + + await expect(dialog).toBeHidden(); + await expect( + frigateApp.page.getByRole("heading", { + name: "Package Theft Investigation", + }), + ).toBeHidden(); + await expect( + frigateApp.page.getByText("Backyard - Car Detection"), + ).toBeHidden(); + await expect( + frigateApp.page.getByText("Front Door - Person Alert"), + ).toBeVisible(); + }); +}); + +test.describe("Export Page - Empty State @high", () => { + test("renders the empty state when there are no exports or cases", async ({ + frigateApp, + }) => { + await frigateApp.page.route("**/api/export**", (route) => + route.fulfill({ json: [] }), + ); + await frigateApp.page.route("**/api/exports**", (route) => + route.fulfill({ json: [] }), + ); + await frigateApp.page.route("**/api/cases", (route) => + route.fulfill({ json: [] }), + ); + await frigateApp.page.route("**/api/cases**", (route) => + route.fulfill({ json: [] }), + ); + + await frigateApp.goto("/export"); + + await expect(frigateApp.page.getByText("No exports found")).toBeVisible(); + }); +}); + +test.describe("Export Page - Mobile @high @mobile", () => { + test("mobile can open an export preview dialog", async ({ frigateApp }) => { + test.skip(!frigateApp.isMobile, "Mobile-only assertion"); + + await frigateApp.goto("/export"); + + await frigateApp.page + .getByText("Front Door - Person Alert") + .first() + .click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: "Front Door - Person Alert" }); + await expect(dialog).toBeVisible(); + await expect(dialog.locator("video")).toBeVisible(); + }); +}); + +test.describe("Multi-Review Export @high", () => { + // Two alert reviews close enough to "now" to fall within the + // default last-24-hours review window. Using numeric timestamps + // because the TS ReviewSegment type expects numbers even though + // the backend pydantic model serializes datetime as ISO strings — + // the app reads these as numbers for display math. + const now = Date.now() / 1000; + const mockReviews = [ + { + id: "mex-review-001", + camera: "front_door", + start_time: now - 600, + end_time: now - 580, + has_been_reviewed: false, + severity: "alert", + thumb_path: "/clips/front_door/mex-review-001-thumb.jpg", + data: { + audio: [], + detections: ["person-001"], + objects: ["person"], + sub_labels: [], + significant_motion_areas: [], + zones: ["front_yard"], + }, + }, + { + id: "mex-review-002", + camera: "backyard", + start_time: now - 1200, + end_time: now - 1170, + has_been_reviewed: false, + severity: "alert", + thumb_path: "/clips/backyard/mex-review-002-thumb.jpg", + data: { + audio: [], + detections: ["car-002"], + objects: ["car"], + sub_labels: [], + significant_motion_areas: [], + zones: ["driveway"], + }, + }, + ]; + + // 51 alert reviews, all front_door, spaced 5 minutes apart. Used by the + // over-limit test to trigger Ctrl+A select-all and verify the Export + // button is hidden at 51 selected. + const oversizedReviews = Array.from({ length: 51 }, (_, i) => ({ + id: `mex-oversized-${i.toString().padStart(3, "0")}`, + camera: "front_door", + start_time: now - 60 * 60 - i * 300, + end_time: now - 60 * 60 - i * 300 + 20, + has_been_reviewed: false, + severity: "alert", + thumb_path: `/clips/front_door/mex-oversized-${i}-thumb.jpg`, + data: { + audio: [], + detections: [`person-${i}`], + objects: ["person"], + sub_labels: [], + significant_motion_areas: [], + zones: ["front_yard"], + }, + })); + + const mockSummary = { + last24Hours: { + reviewed_alert: 0, + reviewed_detection: 0, + total_alert: 2, + total_detection: 0, + }, + }; + + async function routeReviews( + page: import("@playwright/test").Page, + reviews: unknown[], + ) { + // Intercept the actual `/api/review` endpoint (singular — the + // default api-mocker only registers `/api/reviews**` (plural) + // which does not match the real request URL). + await page.route(/\/api\/review(\?|$)/, (route) => + route.fulfill({ json: reviews }), + ); + await page.route(/\/api\/review\/summary/, (route) => + route.fulfill({ json: mockSummary }), + ); + } + + test.beforeEach(async ({ frigateApp }) => { + await routeReviews(frigateApp.page, mockReviews); + // Empty cases list by default so the dialog defaults to "new case". + // Individual tests override this to populate existing cases. + await frigateApp.page.route("**/api/cases", (route) => + route.fulfill({ json: [] }), + ); + }); + + async function selectTwoReviews(frigateApp: { + page: import("@playwright/test").Page; + }) { + // Every review card has className `review-item` on its wrapper + // (see EventView.tsx). Cards also have data-start attributes that + // we can key off if needed. + const reviewItems = frigateApp.page.locator(".review-item"); + await reviewItems.first().waitFor({ state: "visible", timeout: 10_000 }); + + // Meta-click the first two items to enter multi-select mode. + // PreviewThumbnailPlayer reads e.metaKey to decide multi-select. + await reviewItems.nth(0).click({ modifiers: ["Meta"] }); + await reviewItems.nth(1).click(); + } + + test("selecting two reviews reveals the export button", async ({ + frigateApp, + }) => { + test.skip(frigateApp.isMobile, "Desktop multi-select flow"); + + await frigateApp.goto("/review"); + + await selectTwoReviews(frigateApp); + + // Action group replaces the filter bar once items are selected + await expect(frigateApp.page.getByText(/2.*selected/i)).toBeVisible({ + timeout: 5_000, + }); + + const exportButton = frigateApp.page.getByRole("button", { + name: /export/i, + }); + await expect(exportButton).toBeVisible(); + }); + + test("clicking export opens the multi-review dialog with correct title", async ({ + frigateApp, + }) => { + test.skip(frigateApp.isMobile, "Desktop multi-select flow"); + + await frigateApp.goto("/review"); + + await selectTwoReviews(frigateApp); + + await frigateApp.page + .getByRole("button", { name: /export/i }) + .first() + .click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: /Export 2 reviews/i }); + await expect(dialog).toBeVisible({ timeout: 5_000 }); + // The dialog uses a Select trigger for case selection (admins). The + // default "None" value is shown on the trigger. + await expect(dialog.locator("button[role='combobox']")).toBeVisible(); + await expect(dialog.getByText(/None/)).toBeVisible(); + }); + + test("starting an export posts the expected payload and navigates to the case", async ({ + frigateApp, + }) => { + test.skip(frigateApp.isMobile, "Desktop multi-select flow"); + + let capturedPayload: unknown = null; + await frigateApp.page.route("**/api/exports/batch", async (route) => { + capturedPayload = route.request().postDataJSON(); + await route.fulfill({ + status: 202, + json: { + export_case_id: "new-case-xyz", + export_ids: ["front_door_a", "backyard_b"], + results: [ + { + camera: "front_door", + export_id: "front_door_a", + success: true, + status: "queued", + error: null, + item_index: 0, + }, + { + camera: "backyard", + export_id: "backyard_b", + success: true, + status: "queued", + error: null, + item_index: 1, + }, + ], + }, + }); + }); + + await frigateApp.goto("/review"); + await selectTwoReviews(frigateApp); + await frigateApp.page + .getByRole("button", { name: /export/i }) + .first() + .click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: /Export 2 reviews/i }); + await expect(dialog).toBeVisible({ timeout: 5_000 }); + + // Select "Create new case" from the case dropdown (default is "None") + await dialog.locator("button[role='combobox']").click(); + await frigateApp.page + .getByRole("option", { name: /Create new case/i }) + .click(); + + const nameInput = dialog.locator("input").first(); + await nameInput.fill("E2E Incident"); + + await dialog.getByRole("button", { name: /export 2 reviews/i }).click(); + + // Wait for the POST to fire + await expect.poll(() => capturedPayload, { timeout: 5_000 }).not.toBeNull(); + + const payload = capturedPayload as { + items: Array<{ + camera: string; + start_time: number; + end_time: number; + image_path?: string; + client_item_id?: string; + }>; + new_case_name?: string; + export_case_id?: string; + }; + expect(payload.items).toHaveLength(2); + expect(payload.new_case_name).toBe("E2E Incident"); + // When creating a new case, we must NOT also send export_case_id — + // the two fields are mutually exclusive on the backend. + expect(payload.export_case_id).toBeUndefined(); + expect(payload.items.map((i) => i.camera).sort()).toEqual([ + "backyard", + "front_door", + ]); + // Each item must preserve REVIEW_PADDING (4s) on the edges — + // i.e. the padded window is 8s longer than the original review. + // The mock reviews above have 20s and 30s raw durations, so the + // expected padded durations are 28s and 38s. + const paddedDurations = payload.items + .map((i) => i.end_time - i.start_time) + .sort((a, b) => a - b); + expect(paddedDurations).toEqual([28, 38]); + // Thumbnails should be passed through per item + for (const item of payload.items) { + expect(item.image_path).toMatch(/mex-review-\d+-thumb\.jpg$/); } - await expect(frigateApp.page.locator("#pageRoot")).toBeVisible(); - }); -}); + expect(payload.items.map((item) => item.client_item_id)).toEqual([ + "mex-review-001", + "mex-review-002", + ]); -test.describe("Export Page - Controls @high", () => { - test("export page filter controls are present", async ({ frigateApp }) => { - await frigateApp.goto("/export"); - await frigateApp.page.waitForTimeout(1000); - const buttons = frigateApp.page.locator("#pageRoot button"); - const count = await buttons.count(); - expect(count).toBeGreaterThan(0); + await expect(frigateApp.page).toHaveURL(/caseId=new-case-xyz/, { + timeout: 5_000, + }); + }); + + test("mobile opens a drawer (not a dialog) for the multi-review export flow", async ({ + frigateApp, + }) => { + test.skip(!frigateApp.isMobile, "Mobile-only Drawer assertion"); + + await frigateApp.goto("/review"); + await selectTwoReviews(frigateApp); + + await frigateApp.page + .getByRole("button", { name: /export/i }) + .first() + .click(); + + // On mobile the component renders a shadcn Drawer, which uses + // role="dialog" but sets data-vaul-drawer. Desktop renders a + // shadcn Dialog with role="dialog" but no data-vaul-drawer. + // The title and submit button both contain "Export 2 reviews", so + // assert each element distinctly: the title is a heading and the + // submit button has role="button". + const drawer = frigateApp.page.locator("[data-vaul-drawer]"); + await expect(drawer).toBeVisible({ timeout: 5_000 }); + await expect( + drawer.getByRole("heading", { name: /Export 2 reviews/i }), + ).toBeVisible(); + await expect( + drawer.getByRole("button", { name: /export 2 reviews/i }), + ).toBeVisible(); + }); + + test("hides export button when more than 50 reviews are selected", async ({ + frigateApp, + }) => { + test.skip(frigateApp.isMobile, "Desktop select-all keyboard flow"); + + // Override the default 2-review mock with 51 reviews before + // navigation. Playwright matches routes last-registered-first so + // this takes precedence over the beforeEach. + await routeReviews(frigateApp.page, oversizedReviews); + + await frigateApp.goto("/review"); + + // Wait for any review item to render before firing the shortcut + await frigateApp.page + .locator(".review-item") + .first() + .waitFor({ state: "visible", timeout: 10_000 }); + + // Ctrl+A triggers onSelectAllReviews (see EventView.tsx useKeyboardListener) + await frigateApp.page.keyboard.press("Control+a"); + + // The action group should show "51 selected" but no Export button. + // Mark-as-reviewed is still there so the action bar is rendered. + // Scope the "Mark as reviewed" lookup to its exact aria-label because + // the page can render other "mark as reviewed" controls elsewhere + // (e.g. on individual cards) that would trip strict-mode matching. + await expect(frigateApp.page.getByText(/51.*selected/i)).toBeVisible({ + timeout: 5_000, + }); + await expect( + frigateApp.page.getByRole("button", { name: "Mark as reviewed" }), + ).toBeVisible(); + await expect( + frigateApp.page.getByRole("button", { name: /^export$/i }), + ).toHaveCount(0); + }); + + test("attaching to an existing case sends export_case_id without new_case_name", async ({ + frigateApp, + }) => { + test.skip(frigateApp.isMobile, "Desktop multi-select flow"); + + // Seed one existing case so the dialog can offer the "existing" branch. + // The fixture mocks the user as admin (adminProfile()), so useIsAdmin() + // is true and the dialog renders the "Existing case" radio. + await frigateApp.page.route("**/api/cases", (route) => + route.fulfill({ + json: [ + { + id: "existing-case-abc", + name: "Incident #42", + description: "", + created_at: now - 3600, + updated_at: now - 3600, + }, + ], + }), + ); + + let capturedPayload: unknown = null; + await frigateApp.page.route("**/api/exports/batch", async (route) => { + capturedPayload = route.request().postDataJSON(); + await route.fulfill({ + status: 202, + json: { + export_case_id: "existing-case-abc", + export_ids: ["front_door_a", "backyard_b"], + results: [ + { + camera: "front_door", + export_id: "front_door_a", + success: true, + status: "queued", + error: null, + item_index: 0, + }, + { + camera: "backyard", + export_id: "backyard_b", + success: true, + status: "queued", + error: null, + item_index: 1, + }, + ], + }, + }); + }); + + await frigateApp.goto("/review"); + await selectTwoReviews(frigateApp); + + await frigateApp.page + .getByRole("button", { name: /export/i }) + .first() + .click(); + + const dialog = frigateApp.page + .getByRole("dialog") + .filter({ hasText: /Export 2 reviews/i }); + await expect(dialog).toBeVisible({ timeout: 5_000 }); + + // Open the Case Select dropdown and pick the seeded case directly. + // The dialog now uses a single Select listing existing cases above + // the "Create new case" option — no radio toggle needed. + const selectTrigger = dialog.locator("button[role='combobox']").first(); + await selectTrigger.waitFor({ state: "visible", timeout: 5_000 }); + await selectTrigger.click(); + + // The dropdown portal renders outside the dialog + await frigateApp.page.getByRole("option", { name: /Incident #42/ }).click(); + + await dialog.getByRole("button", { name: /export 2 reviews/i }).click(); + + await expect.poll(() => capturedPayload, { timeout: 5_000 }).not.toBeNull(); + + const payload = capturedPayload as { + items: unknown[]; + new_case_name?: string; + new_case_description?: string; + export_case_id?: string; + }; + expect(payload.export_case_id).toBe("existing-case-abc"); + expect(payload.new_case_name).toBeUndefined(); + expect(payload.new_case_description).toBeUndefined(); + expect(payload.items).toHaveLength(2); + + // Navigate should hit /export. useSearchEffect consumes the caseId + // query param and strips it once the case is found in the cases list, + // so we assert on the path, not the query string. + await expect(frigateApp.page).toHaveURL(/\/export(\?|$)/, { + timeout: 5_000, + }); }); }); diff --git a/web/package.json b/web/package.json index e3fc4af9a..0ece2d6fe 100644 --- a/web/package.json +++ b/web/package.json @@ -7,7 +7,8 @@ "dev": "vite --host", "postinstall": "patch-package", "build": "tsc && vite build --base=/BASE_PATH/", - "lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore .", + "lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore . && npm run e2e:lint", + "e2e:lint": "node e2e/scripts/lint-specs.mjs", "lint:fix": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore --fix .", "preview": "vite preview", "prettier:write": "prettier -u -w --ignore-path .gitignore \"*.{ts,tsx,js,jsx,css,html}\"", diff --git a/web/public/locales/en/components/dialog.json b/web/public/locales/en/components/dialog.json index 0231cfe41..3630d68e0 100644 --- a/web/public/locales/en/components/dialog.json +++ b/web/public/locales/en/components/dialog.json @@ -50,24 +50,77 @@ "placeholder": "Name the Export" }, "case": { + "newCaseOption": "Create new case", + "newCaseNamePlaceholder": "New case name", + "newCaseDescriptionPlaceholder": "Case description", "label": "Case", + "nonAdminHelp": "A new case will be created for these exports.", "placeholder": "Select a case" }, "select": "Select", "export": "Export", + "queueing": "Queueing Export...", "selectOrExport": "Select or Export", + "tabs": { + "export": "Single Camera", + "multiCamera": "Multi-Camera" + }, + "multiCamera": { + "timeRange": "Time range", + "selectFromTimeline": "Select from Timeline", + "cameraSelection": "Cameras", + "cameraSelectionHelp": "Cameras with tracked objects in this time range are pre-selected", + "checkingActivity": "Checking camera activity...", + "noCameras": "No cameras available", + "detectionCount_one": "1 tracked object", + "detectionCount_other": "{{count}} tracked objects", + "nameLabel": "Export name", + "namePlaceholder": "Optional base name for these exports", + "queueingButton": "Queueing Exports...", + "exportButton_one": "Export 1 Camera", + "exportButton_other": "Export {{count}} Cameras" + }, + "multi": { + "title_one": "Export 1 review", + "title_other": "Export {{count}} reviews", + "description": "Export each selected review. All exports will be grouped under a single case.", + "descriptionNoCase": "Export each selected review.", + "caseNamePlaceholder": "Review export - {{date}}", + "exportButton_one": "Export 1 review", + "exportButton_other": "Export {{count}} reviews", + "exportingButton": "Exporting...", + "toast": { + "started_one": "Started 1 export. Opening the case now.", + "started_other": "Started {{count}} exports. Opening the case now.", + "startedNoCase_one": "Started 1 export.", + "startedNoCase_other": "Started {{count}} exports.", + "partial": "Started {{successful}} of {{total}} exports. Failed: {{failedItems}}", + "failed": "Failed to start {{total}} exports. Failed: {{failedItems}}" + } + }, "toast": { "success": "Successfully started export. View the file in the exports page.", + "queued": "Export queued. View progress in the exports page.", "view": "View", + "batchSuccess_one": "Started 1 export. Opening the case now.", + "batchSuccess_other": "Started {{count}} exports. Opening the case now.", + "batchPartial": "Started {{successful}} of {{total}} exports. Failed cameras: {{failedCameras}}", + "batchFailed": "Failed to start {{total}} exports. Failed cameras: {{failedCameras}}", + "batchQueuedSuccess_one": "Queued 1 export. Opening the case now.", + "batchQueuedSuccess_other": "Queued {{count}} exports. Opening the case now.", + "batchQueuedPartial": "Queued {{successful}} of {{total}} exports. Failed cameras: {{failedCameras}}", + "batchQueueFailed": "Failed to queue {{total}} exports. Failed cameras: {{failedCameras}}", "error": { - "failed": "Failed to start export: {{error}}", + "failed": "Failed to queue export: {{error}}", "endTimeMustAfterStartTime": "End time must be after start time", "noVaildTimeSelected": "No valid time range selected" } }, "fromTimeline": { "saveExport": "Save Export", - "previewExport": "Preview Export" + "queueingExport": "Queueing Export...", + "previewExport": "Preview Export", + "useThisRange": "Use This Range" } }, "streaming": { diff --git a/web/public/locales/en/views/chat.json b/web/public/locales/en/views/chat.json index ca0520d88..6d78dc71f 100644 --- a/web/public/locales/en/views/chat.json +++ b/web/public/locales/en/views/chat.json @@ -12,6 +12,23 @@ "result": "Result", "arguments": "Arguments:", "response": "Response:", + "attachment_chip_label": "{{label}} on {{camera}}", + "attachment_chip_remove": "Remove attachment", + "open_in_explore": "Open in Explore", + "attach_event_aria": "Attach event {{eventId}}", + "attachment_picker_paste_label": "Or paste event ID", + "attachment_picker_attach": "Attach", + "attachment_picker_placeholder": "Attach an event", + "quick_reply_find_similar": "Find similar sightings", + "quick_reply_tell_me_more": "Tell me more about this", + "quick_reply_when_else": "When else was it seen?", + "quick_reply_find_similar_text": "Find similar sightings to this.", + "quick_reply_tell_me_more_text": "Tell me more about this one.", + "quick_reply_when_else_text": "When else was this seen?", + "anchor": "Reference", + "similarity_score": "Similarity", + "no_similar_objects_found": "No similar objects found.", + "semantic_search_required": "Semantic search must be enabled to find similar objects.", "send": "Send", "suggested_requests": "Try asking:", "starting_requests": { diff --git a/web/public/locales/en/views/exports.json b/web/public/locales/en/views/exports.json index 46cd06ead..5e64952d8 100644 --- a/web/public/locales/en/views/exports.json +++ b/web/public/locales/en/views/exports.json @@ -20,14 +20,30 @@ "downloadVideo": "Download video", "editName": "Edit name", "deleteExport": "Delete export", - "assignToCase": "Add to case" + "assignToCase": "Add to case", + "removeFromCase": "Remove from case" + }, + "toolbar": { + "newCase": "New Case", + "addExport": "Add Export", + "editCase": "Edit Case", + "deleteCase": "Delete Case" }, "toast": { "error": { "renameExportFailed": "Failed to rename export: {{errorMessage}}", - "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}" + "assignCaseFailed": "Failed to update case assignment: {{errorMessage}}", + "caseSaveFailed": "Failed to save case: {{errorMessage}}", + "caseDeleteFailed": "Failed to delete case: {{errorMessage}}" } }, + "deleteCase": { + "label": "Delete Case", + "desc": "Are you sure you want to delete {{caseName}}?", + "descKeepExports": "Exports will remain available as uncategorized exports.", + "descDeleteExports": "All exports in this case will be permanently deleted.", + "deleteExports": "Also delete exports" + }, "caseDialog": { "title": "Add to case", "description": "Choose an existing case or create a new one.", @@ -35,5 +51,73 @@ "newCaseOption": "Create new case", "nameLabel": "Case name", "descriptionLabel": "Description" + }, + "caseCard": { + "emptyCase": "No exports yet" + }, + "jobCard": { + "defaultName": "{{camera}} export", + "queued": "Queued", + "running": "Running" + }, + "caseView": { + "noDescription": "No description", + "createdAt": "Created {{value}}", + "exportCount_one": "1 export", + "exportCount_other": "{{count}} exports", + "cameraCount_one": "1 camera", + "cameraCount_other": "{{count}} cameras", + "showMore": "Show more", + "showLess": "Show less", + "emptyTitle": "This case is empty", + "emptyDescription": "Add existing uncategorized exports to keep the case organized.", + "emptyDescriptionNoExports": "There are no uncategorized exports available to add yet." + }, + "caseEditor": { + "createTitle": "Create Case", + "editTitle": "Edit Case", + "namePlaceholder": "Case name", + "descriptionPlaceholder": "Add notes or context for this case" + }, + "addExportDialog": { + "title": "Add Export to {{caseName}}", + "searchPlaceholder": "Search uncategorized exports", + "empty": "No uncategorized exports match this search.", + "addButton_one": "Add 1 Export", + "addButton_other": "Add {{count}} Exports", + "adding": "Adding..." + }, + "selected_one": "{{count}} selected", + "selected_other": "{{count}} selected", + "bulkActions": { + "addToCase": "Add to Case", + "moveToCase": "Move to Case", + "removeFromCase": "Remove from Case", + "delete": "Delete", + "deleteNow": "Delete Now" + }, + "bulkDelete": { + "title": "Delete Exports", + "desc_one": "Are you sure you want to delete {{count}} export?", + "desc_other": "Are you sure you want to delete {{count}} exports?" + }, + "bulkRemoveFromCase": { + "title": "Remove from Case", + "desc_one": "Remove {{count}} export from this case?", + "desc_other": "Remove {{count}} exports from this case?", + "descKeepExports": "Exports will be moved to uncategorized.", + "descDeleteExports": "Exports will be permanently deleted.", + "deleteExports": "Delete exports instead" + }, + "bulkToast": { + "success": { + "delete": "Successfully deleted exports", + "reassign": "Successfully updated case assignment", + "remove": "Successfully removed exports from case" + }, + "error": { + "deleteFailed": "Failed to delete exports: {{errorMessage}}", + "reassignFailed": "Failed to update case assignment: {{errorMessage}}" + } } } diff --git a/web/src/components/Statusbar.tsx b/web/src/components/Statusbar.tsx index 18a0d9ee1..4e7c4053c 100644 --- a/web/src/components/Statusbar.tsx +++ b/web/src/components/Statusbar.tsx @@ -7,6 +7,7 @@ import useStats, { useAutoFrigateStats } from "@/hooks/use-stats"; import { cn } from "@/lib/utils"; import type { ProfilesApiResponse } from "@/types/profile"; import { getProfileColor } from "@/utils/profileColors"; +import { useIsAdmin } from "@/hooks/use-is-admin"; import { useContext, useEffect, useMemo } from "react"; import { useTranslation } from "react-i18next"; import useSWR from "swr"; @@ -18,6 +19,7 @@ import { Link } from "react-router-dom"; export default function Statusbar() { const { t } = useTranslation(["views/system"]); + const isAdmin = useIsAdmin(); const { messages, addMessage, clearMessages } = useContext( StatusBarMessagesContext, @@ -154,9 +156,23 @@ export default function Statusbar() { ); })} - {activeProfile && ( - -
+ {activeProfile && + (isAdmin ? ( + +
+ + + {activeProfile.friendlyName} + +
+ + ) : ( +
- - )} + ))}
{Object.entries(messages).length === 0 ? ( diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index c8d9c4c65..724179128 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -1,6 +1,6 @@ import ActivityIndicator from "../indicators/activity-indicator"; import { Button } from "../ui/button"; -import { useCallback, useMemo, useState } from "react"; +import { useCallback, useMemo, useRef, useState } from "react"; import { isMobile } from "react-device-detect"; import { FiMoreVertical } from "react-icons/fi"; import { Skeleton } from "../ui/skeleton"; @@ -13,7 +13,7 @@ import { } from "../ui/dialog"; import { Input } from "../ui/input"; import useKeyboardListener from "@/hooks/use-keyboard-listener"; -import { DeleteClipType, Export, ExportCase } from "@/types/export"; +import { DeleteClipType, Export, ExportCase, ExportJob } from "@/types/export"; import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; @@ -27,7 +27,10 @@ import { DropdownMenuItem, DropdownMenuTrigger, } from "../ui/dropdown-menu"; -import { FaFolder } from "react-icons/fa"; +import { FaFolder, FaVideo } from "react-icons/fa"; +import { HiSquare2Stack } from "react-icons/hi2"; +import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name"; +import useContextMenu from "@/hooks/use-contextmenu"; type CaseCardProps = { className: string; @@ -41,10 +44,15 @@ export function CaseCard({ exports, onSelect, }: CaseCardProps) { + const { t } = useTranslation(["views/exports"]); const firstExport = useMemo( () => exports.find((exp) => exp.thumb_path && exp.thumb_path.length > 0), [exports], ); + const cameraCount = useMemo( + () => new Set(exports.map((exp) => exp.camera)).size, + [exports], + ); return (
)} + {!firstExport && ( +
+ )}
-
- -
{exportCase.name}
+
+
+ +
{exports.length}
+
+
+ +
{cameraCount}
+
+
+
+
+ +
{exportCase.name}
+
+ {exports.length === 0 && ( +
+ {t("caseCard.emptyCase")} +
+ )}
); @@ -73,18 +101,26 @@ export function CaseCard({ type ExportCardProps = { className: string; exportedRecording: Export; + isSelected?: boolean; + selectionMode?: boolean; onSelect: (selected: Export) => void; + onContextSelect?: (selected: Export) => void; onRename: (original: string, update: string) => void; onDelete: ({ file, exportName }: DeleteClipType) => void; onAssignToCase?: (selected: Export) => void; + onRemoveFromCase?: (selected: Export) => void; }; export function ExportCard({ className, exportedRecording, + isSelected, + selectionMode, onSelect, + onContextSelect, onRename, onDelete, onAssignToCase, + onRemoveFromCase, }: ExportCardProps) { const { t } = useTranslation(["views/exports"]); const isAdmin = useIsAdmin(); @@ -92,6 +128,15 @@ export function ExportCard({ exportedRecording.thumb_path.length > 0, ); + // selection + + const cardRef = useRef(null); + useContextMenu(cardRef, () => { + if (!exportedRecording.in_progress && onContextSelect) { + onContextSelect(exportedRecording); + } + }); + // editing name const [editName, setEditName] = useState<{ @@ -180,13 +225,18 @@ export function ExportCard({
{ + onClick={(e) => { if (!exportedRecording.in_progress) { - onSelect(exportedRecording); + if ((selectionMode || e.ctrlKey || e.metaKey) && onContextSelect) { + onContextSelect(exportedRecording); + } else { + onSelect(exportedRecording); + } } }} > @@ -205,7 +255,7 @@ export function ExportCard({ )} )} - {!exportedRecording.in_progress && ( + {!exportedRecording.in_progress && !selectionMode && (
@@ -254,6 +304,18 @@ export function ExportCard({ {t("tooltip.assignToCase")} )} + {isAdmin && onRemoveFromCase && ( + { + e.stopPropagation(); + onRemoveFromCase(exportedRecording); + }} + > + {t("tooltip.removeFromCase")} + + )} {isAdmin && ( )} -
- {exportedRecording.name.replaceAll("_", " ")} +
+
+
+ {exportedRecording.name.replaceAll("_", " ")} +
); } + +type ActiveExportJobCardProps = { + className?: string; + job: ExportJob; +}; + +export function ActiveExportJobCard({ + className = "", + job, +}: ActiveExportJobCardProps) { + const { t } = useTranslation(["views/exports", "common"]); + const cameraName = useCameraFriendlyName(job.camera); + const displayName = useMemo(() => { + if (job.name && job.name.length > 0) { + return job.name.replaceAll("_", " "); + } + + return t("jobCard.defaultName", { + camera: cameraName, + }); + }, [cameraName, job.name, t]); + const statusLabel = + job.status === "queued" ? t("jobCard.queued") : t("jobCard.running"); + + return ( +
+
+ {statusLabel} +
+
+ +
{displayName}
+
+
+ ); +} diff --git a/web/src/components/card/ReviewCard.tsx b/web/src/components/card/ReviewCard.tsx index 0ae8d376d..6fb72a6fa 100644 --- a/web/src/components/card/ReviewCard.tsx +++ b/web/src/components/card/ReviewCard.tsx @@ -81,7 +81,7 @@ export default function ReviewCard({ axios .post( - `export/${event.camera}/start/${event.start_time + REVIEW_PADDING}/end/${endTime}`, + `export/${event.camera}/start/${event.start_time - REVIEW_PADDING}/end/${endTime}`, { playback: "realtime" }, ) .then((response) => { diff --git a/web/src/components/chat/ChatAttachmentChip.tsx b/web/src/components/chat/ChatAttachmentChip.tsx new file mode 100644 index 000000000..5894efaa7 --- /dev/null +++ b/web/src/components/chat/ChatAttachmentChip.tsx @@ -0,0 +1,111 @@ +import { useApiHost } from "@/api"; +import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name"; +import { useTranslation } from "react-i18next"; +import useSWR from "swr"; +import { LuX, LuExternalLink } from "react-icons/lu"; +import { Button } from "@/components/ui/button"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { cn } from "@/lib/utils"; +import { getTranslatedLabel } from "@/utils/i18n"; + +type ChatAttachmentChipProps = { + eventId: string; + mode: "composer" | "bubble"; + onRemove?: () => void; +}; + +/** + * Small horizontal chip rendering an event as an "attachment": a thumbnail, + * a friendly label like "Person on driveway", an optional remove X (composer + * mode), and an external-link icon that opens the event in Explore. + */ +export function ChatAttachmentChip({ + eventId, + mode, + onRemove, +}: ChatAttachmentChipProps) { + const apiHost = useApiHost(); + const { t } = useTranslation(["views/chat"]); + + const { data: eventData } = useSWR<{ label: string; camera: string }[]>( + `event_ids?ids=${eventId}`, + ); + const evt = eventData?.[0]; + const cameraName = useCameraFriendlyName(evt?.camera); + const displayLabel = evt + ? t("attachment_chip_label", { + label: getTranslatedLabel(evt.label), + camera: cameraName, + }) + : eventId; + + return ( +
+
+ { + (e.currentTarget as HTMLImageElement).style.visibility = "hidden"; + }} + /> +
+ {evt ? ( + + {displayLabel} + + ) : ( + + )} + + + e.stopPropagation()} + aria-label={t("open_in_explore")} + > + + + + {t("open_in_explore")} + + {mode === "composer" && onRemove && ( + + )} +
+ ); +} diff --git a/web/src/components/chat/ChatEventThumbnailsRow.tsx b/web/src/components/chat/ChatEventThumbnailsRow.tsx index bf2c5e88f..a12153e89 100644 --- a/web/src/components/chat/ChatEventThumbnailsRow.tsx +++ b/web/src/components/chat/ChatEventThumbnailsRow.tsx @@ -1,42 +1,97 @@ import { useApiHost } from "@/api"; +import { useTranslation } from "react-i18next"; +import { LuExternalLink } from "react-icons/lu"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { cn } from "@/lib/utils"; + +type ChatEvent = { id: string; score?: number }; type ChatEventThumbnailsRowProps = { - events: { id: string }[]; + events: ChatEvent[]; + anchor?: { id: string } | null; + onAttach?: (eventId: string) => void; }; /** - * Horizontal scroll row of event thumbnail images for chat (e.g. after search_objects). - * Renders nothing when events is empty. + * Horizontal scroll row of event thumbnail images for chat. + * Optionally renders an anchor thumbnail with a "reference" badge above the + * results, and per-event similarity scores when provided. + * Clicking a thumbnail calls onAttach; a small external-link overlay opens + * the event in Explore. + * Renders nothing when there is nothing to show. */ export function ChatEventThumbnailsRow({ events, + anchor = null, + onAttach, }: ChatEventThumbnailsRowProps) { const apiHost = useApiHost(); + const { t } = useTranslation(["views/chat"]); - if (events.length === 0) return null; + if (events.length === 0 && !anchor) return null; + + const renderThumb = (event: ChatEvent, isAnchor = false) => ( +
+ + + + e.stopPropagation()} + className="absolute right-1 top-1 flex size-6 items-center justify-center rounded bg-black/60 text-white hover:bg-black/80" + aria-label={t("open_in_explore")} + > + + + + {t("open_in_explore")} + + {isAnchor && ( + + {t("anchor")} + + )} +
+ ); return ( -
-
-
- {events.map((event) => ( - - - - ))} +
+ {anchor && ( +
+
{renderThumb(anchor, true)}
-
+ )} + {events.length > 0 && ( +
+
+ {events.map((event) => renderThumb(event))} +
+
+ )}
); } diff --git a/web/src/components/chat/ChatMessage.tsx b/web/src/components/chat/ChatMessage.tsx index b21fae435..c5f92b5f4 100644 --- a/web/src/components/chat/ChatMessage.tsx +++ b/web/src/components/chat/ChatMessage.tsx @@ -15,6 +15,8 @@ import { TooltipTrigger, } from "@/components/ui/tooltip"; import { cn } from "@/lib/utils"; +import { ChatAttachmentChip } from "@/components/chat/ChatAttachmentChip"; +import { parseAttachedEvent } from "@/utils/chatUtil"; type MessageBubbleProps = { role: "user" | "assistant"; @@ -126,6 +128,10 @@ export function MessageBubble({ ); } + const { eventId: attachedEventId, body: displayContent } = isUser + ? parseAttachedEvent(content) + : { eventId: null, body: content }; + return (
{isUser ? ( - content +
+ {attachedEventId && ( + + )} +
{displayContent}
+
) : ( - <> +
*:last-child]:inline", + !isComplete && + "after:ml-0.5 after:inline-block after:h-4 after:w-2 after:animate-cursor-blink after:rounded-sm after:bg-foreground after:align-middle after:content-['']", + )} + > {content} - {!isComplete && ( - - )} - +
)}
diff --git a/web/src/components/chat/ChatPaperclipButton.tsx b/web/src/components/chat/ChatPaperclipButton.tsx new file mode 100644 index 000000000..fac39f3c6 --- /dev/null +++ b/web/src/components/chat/ChatPaperclipButton.tsx @@ -0,0 +1,114 @@ +import { useState } from "react"; +import { useTranslation } from "react-i18next"; +import { LuPaperclip } from "react-icons/lu"; +import { useApiHost } from "@/api"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; + +const EVENT_ID_RE = /^[A-Za-z0-9._-]+$/; + +type ChatPaperclipButtonProps = { + recentEventIds: string[]; + onAttach: (eventId: string) => void; + disabled?: boolean; +}; + +/** + * Paperclip button with a popover for picking an event to attach. + * Shows a grid of recent thumbnails (from the latest assistant message) and a + * "paste event ID" fallback input. + */ +export function ChatPaperclipButton({ + recentEventIds, + onAttach, + disabled = false, +}: ChatPaperclipButtonProps) { + const apiHost = useApiHost(); + const { t } = useTranslation(["views/chat"]); + const [open, setOpen] = useState(false); + const [pasteId, setPasteId] = useState(""); + + const handlePickThumbnail = (eventId: string) => { + onAttach(eventId); + setOpen(false); + setPasteId(""); + }; + + const handlePasteSubmit = () => { + const trimmed = pasteId.trim(); + if (!trimmed || !EVENT_ID_RE.test(trimmed)) return; + onAttach(trimmed); + setOpen(false); + setPasteId(""); + }; + + const handlePasteKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault(); + handlePasteSubmit(); + } + }; + + return ( + + + + + +
+ {recentEventIds.length > 0 && ( +
+ {recentEventIds.slice(0, 8).map((id) => ( + + ))} +
+ )} +
+ setPasteId(e.target.value)} + onKeyDown={handlePasteKeyDown} + className="h-8 text-xs" + /> + +
+
+
+
+ ); +} diff --git a/web/src/components/chat/ChatQuickReplies.tsx b/web/src/components/chat/ChatQuickReplies.tsx new file mode 100644 index 000000000..2499b305c --- /dev/null +++ b/web/src/components/chat/ChatQuickReplies.tsx @@ -0,0 +1,49 @@ +import { useTranslation } from "react-i18next"; +import { Button } from "@/components/ui/button"; + +type QuickReply = { labelKey: string; textKey: string }; + +const REPLIES: QuickReply[] = [ + { + labelKey: "quick_reply_find_similar", + textKey: "quick_reply_find_similar_text", + }, + { + labelKey: "quick_reply_tell_me_more", + textKey: "quick_reply_tell_me_more_text", + }, + { labelKey: "quick_reply_when_else", textKey: "quick_reply_when_else_text" }, +]; + +type ChatQuickRepliesProps = { + onSend: (text: string) => void; + disabled?: boolean; +}; + +/** + * Row of pill buttons shown in the composer while an attachment is pending. + * Clicking a pill immediately calls onSend with the canned text. + */ +export function ChatQuickReplies({ + onSend, + disabled = false, +}: ChatQuickRepliesProps) { + const { t } = useTranslation(["views/chat"]); + + return ( +
+ {REPLIES.map((reply) => ( + + ))} +
+ ); +} diff --git a/web/src/components/config-form/section-configs/record.ts b/web/src/components/config-form/section-configs/record.ts index 89b1232bf..1d47454d7 100644 --- a/web/src/components/config-form/section-configs/record.ts +++ b/web/src/components/config-form/section-configs/record.ts @@ -56,6 +56,11 @@ const record: SectionConfigOverrides = { }, camera: { restartRequired: [], + hiddenFields: [ + "enabled_in_config", + "sync_recordings", + "export.max_concurrent", + ], }, }; diff --git a/web/src/components/filter/CameraGroupSelector.tsx b/web/src/components/filter/CameraGroupSelector.tsx index c50abf692..5c092ac1f 100644 --- a/web/src/components/filter/CameraGroupSelector.tsx +++ b/web/src/components/filter/CameraGroupSelector.tsx @@ -89,6 +89,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) { const { t } = useTranslation(["components/camera"]); const { data: config } = useSWR("config"); const allowedCameras = useAllowedCameras(); + const hasFullCameraAccess = useHasFullCameraAccess(); const isAdmin = useIsAdmin(); // tooltip @@ -125,7 +126,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) { const allGroups = Object.entries(config.camera_groups); // If custom role, filter out groups where user has no accessible cameras - if (!isAdmin) { + if (!hasFullCameraAccess) { return allGroups .filter(([, groupConfig]) => { // Check if user has access to at least one camera in this group @@ -137,7 +138,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) { } return allGroups.sort((a, b) => a[1].order - b[1].order); - }, [config, allowedCameras, isAdmin]); + }, [config, allowedCameras, hasFullCameraAccess]); // add group diff --git a/web/src/components/filter/ExportActionGroup.tsx b/web/src/components/filter/ExportActionGroup.tsx new file mode 100644 index 000000000..92e5f251b --- /dev/null +++ b/web/src/components/filter/ExportActionGroup.tsx @@ -0,0 +1,384 @@ +import { useCallback, useMemo, useState } from "react"; +import axios from "axios"; +import { Button, buttonVariants } from "../ui/button"; +import { isDesktop } from "react-device-detect"; +import { HiTrash } from "react-icons/hi"; +import { LuFolderPlus, LuFolderX } from "react-icons/lu"; +import { Export, ExportCase } from "@/types/export"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "../ui/alert-dialog"; +import { Label } from "../ui/label"; +import { Switch } from "../ui/switch"; +import useKeyboardListener from "@/hooks/use-keyboard-listener"; +import { useTranslation } from "react-i18next"; +import { toast } from "sonner"; +import { useIsAdmin } from "@/hooks/use-is-admin"; +import OptionAndInputDialog from "../overlay/dialog/OptionAndInputDialog"; + +type ExportActionGroupProps = { + selectedExports: Export[]; + setSelectedExports: (exports: Export[]) => void; + context: "uncategorized" | "case"; + cases?: ExportCase[]; + currentCaseId?: string; + mutate: () => void; +}; +export default function ExportActionGroup({ + selectedExports, + setSelectedExports, + context, + cases, + currentCaseId, + mutate, +}: ExportActionGroupProps) { + const { t } = useTranslation(["views/exports", "common"]); + const isAdmin = useIsAdmin(); + + const onClearSelected = useCallback(() => { + setSelectedExports([]); + }, [setSelectedExports]); + + // ── Delete ────────────────────────────────────────────────────── + + const onDelete = useCallback(() => { + const ids = selectedExports.map((e) => e.id); + axios + .post("exports/delete", { ids }) + .then((resp) => { + if (resp.status === 200) { + toast.success(t("bulkToast.success.delete"), { + position: "top-center", + }); + setSelectedExports([]); + mutate(); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("bulkToast.error.deleteFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, [selectedExports, setSelectedExports, mutate, t]); + + const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); + const [bypassDialog, setBypassDialog] = useState(false); + + useKeyboardListener(["Shift"], (_, modifiers) => { + setBypassDialog(modifiers.shift); + return false; + }); + + const handleDelete = useCallback(() => { + if (bypassDialog) { + onDelete(); + } else { + setDeleteDialogOpen(true); + } + }, [bypassDialog, onDelete]); + + // ── Remove from case ──────────────────────────────────────────── + + const [removeDialogOpen, setRemoveDialogOpen] = useState(false); + const [deleteExportsOnRemove, setDeleteExportsOnRemove] = useState(false); + + const handleRemoveFromCase = useCallback(() => { + const ids = selectedExports.map((e) => e.id); + + const request = deleteExportsOnRemove + ? axios.post("exports/delete", { ids }) + : axios.post("exports/reassign", { ids, export_case_id: null }); + + request + .then((resp) => { + if (resp.status === 200) { + toast.success(t("bulkToast.success.remove"), { + position: "top-center", + }); + setSelectedExports([]); + mutate(); + setRemoveDialogOpen(false); + setDeleteExportsOnRemove(false); + } + }) + .catch((error) => { + const errorMessage = + error.response?.data?.message || + error.response?.data?.detail || + "Unknown error"; + toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), { + position: "top-center", + }); + }); + }, [selectedExports, deleteExportsOnRemove, setSelectedExports, mutate, t]); + + // ── Case picker ───────────────────────────────────────────────── + + const [casePickerOpen, setCasePickerOpen] = useState(false); + + const caseOptions = useMemo( + () => [ + ...(cases ?? []) + .filter((c) => c.id !== currentCaseId) + .map((c) => ({ + value: c.id, + label: c.name, + })) + .sort((a, b) => a.label.localeCompare(b.label)), + { + value: "new", + label: t("caseDialog.newCaseOption"), + }, + ], + [cases, currentCaseId, t], + ); + + const handleAssignToCase = useCallback( + async (caseId: string) => { + const ids = selectedExports.map((e) => e.id); + try { + await axios.post("exports/reassign", { + ids, + export_case_id: caseId, + }); + toast.success(t("bulkToast.success.reassign"), { + position: "top-center", + }); + setSelectedExports([]); + mutate(); + } catch (error) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), { + position: "top-center", + }); + throw error; + } + }, + [selectedExports, setSelectedExports, mutate, t], + ); + + const handleCreateNewCase = useCallback( + async (name: string, description: string) => { + const ids = selectedExports.map((e) => e.id); + try { + const createResp = await axios.post("cases", { name, description }); + const newCaseId: string | undefined = createResp.data?.id; + + if (newCaseId) { + await axios.post("exports/reassign", { + ids, + export_case_id: newCaseId, + }); + } + + toast.success(t("bulkToast.success.reassign"), { + position: "top-center", + }); + setSelectedExports([]); + mutate(); + } catch (error) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error(t("bulkToast.error.reassignFailed", { errorMessage }), { + position: "top-center", + }); + throw error; + } + }, + [selectedExports, setSelectedExports, mutate, t], + ); + + return ( + <> + {/* Delete confirmation dialog */} + setDeleteDialogOpen(!deleteDialogOpen)} + > + + + {t("bulkDelete.title")} + + + {t("bulkDelete.desc", { count: selectedExports.length })} + + + + {t("button.cancel", { ns: "common" })} + + + {t("button.delete", { ns: "common" })} + + + + + + {/* Remove from case dialog */} + {context === "case" && ( + { + if (!open) { + setRemoveDialogOpen(false); + setDeleteExportsOnRemove(false); + } + }} + > + + + + {t("bulkRemoveFromCase.title")} + + + {t("bulkRemoveFromCase.desc", { + count: selectedExports.length, + })}{" "} + {deleteExportsOnRemove + ? t("bulkRemoveFromCase.descDeleteExports") + : t("bulkRemoveFromCase.descKeepExports")} + + +
+ + +
+ + + {t("button.cancel", { ns: "common" })} + + + {t("button.delete", { ns: "common" })} + + +
+
+ )} + + {/* Case picker dialog */} + + + {/* Action bar */} +
+
+
+ {t("selected", { count: selectedExports.length })} +
+
{"|"}
+
+ {t("button.unselect", { ns: "common" })} +
+
+ {isAdmin && ( +
+ {/* Add to Case / Move to Case */} + + + {/* Remove from Case (case context only) */} + {context === "case" && ( + + )} + + {/* Delete */} + +
+ )} +
+ + ); +} diff --git a/web/src/components/filter/ReviewActionGroup.tsx b/web/src/components/filter/ReviewActionGroup.tsx index 31c5a56f4..389d12104 100644 --- a/web/src/components/filter/ReviewActionGroup.tsx +++ b/web/src/components/filter/ReviewActionGroup.tsx @@ -6,6 +6,7 @@ import { isDesktop } from "react-device-detect"; import { FaCompactDisc } from "react-icons/fa"; import { HiTrash } from "react-icons/hi"; import { ReviewSegment } from "@/types/review"; +import { MAX_BATCH_EXPORT_ITEMS } from "@/types/export"; import { AlertDialog, AlertDialogAction, @@ -20,6 +21,7 @@ import useKeyboardListener from "@/hooks/use-keyboard-listener"; import { Trans, useTranslation } from "react-i18next"; import { toast } from "sonner"; import { useIsAdmin } from "@/hooks/use-is-admin"; +import MultiExportDialog from "../overlay/MultiExportDialog"; type ReviewActionGroupProps = { selectedReviews: ReviewSegment[]; @@ -164,6 +166,29 @@ export default function ReviewActionGroup({ )} )} + {selectedReviews.length >= 2 && + selectedReviews.length <= MAX_BATCH_EXPORT_ITEMS && ( + { + onClearSelected(); + pullLatestData(); + }} + > + + + )} - - ), - }); - setName(""); - setSelectedCaseId(undefined); - setRange(undefined); - setMode("none"); - } - }) - .catch((error) => { - const errorMessage = - error.response?.data?.message || - error.response?.data?.detail || - "Unknown error"; - toast.error( - t("export.toast.error.failed", { - error: errorMessage, - }), - { position: "top-center" }, - ); + ); + + toast.success(t("export.toast.queued"), { + position: "top-center", + action: ( + + + + ), }); - }, [camera, name, range, selectedCaseId, setRange, setName, setMode, t]); + setName(""); + setSelectedCaseId(undefined); + setSingleNewCaseName(""); + setSingleNewCaseDescription(""); + setRange(undefined); + setMode("none"); + return true; + } catch (error) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + toast.error( + t("export.toast.error.failed", { + error: errorMessage, + }), + { position: "top-center" }, + ); + return false; + } finally { + setIsStartingExport(false); + } + }, [ + camera, + isStartingExport, + name, + range, + selectedCaseId, + singleNewCaseDescription, + singleNewCaseName, + setMode, + setRange, + t, + ]); const handleCancel = useCallback(() => { setName(""); setSelectedCaseId(undefined); + setSingleNewCaseName(""); + setSingleNewCaseDescription(""); setMode("none"); setRange(undefined); + setActiveTab("export"); }, [setMode, setRange]); const Overlay = isDesktop ? Dialog : Drawer; @@ -150,16 +230,31 @@ export default function ExportDialog({ /> setShowPreview(true)} - onSave={() => onStartExport()} + onSave={() => { + if (mode == "timeline_multi") { + setActiveTab("multi"); + setMode("select"); + return; + } + + void onStartExport(); + }} onCancel={handleCancel} /> { if (!open) { - setMode("none"); + handleCancel(); } }} > @@ -171,22 +266,16 @@ export default function ExportDialog({ size="sm" onClick={() => { const now = new Date(latestTime * 1000); - let start = 0; now.setHours(now.getHours() - 1); - start = now.getTime() / 1000; + setActiveTab("export"); setRange({ before: latestTime, - after: start, + after: now.getTime() / 1000, }); setMode("select"); }} > - {isDesktop && ( -
- {t("menu.export", { ns: "common" })} -
- )} )} @@ -203,9 +292,16 @@ export default function ExportDialog({ range={range} name={name} selectedCaseId={selectedCaseId} + singleNewCaseName={singleNewCaseName} + singleNewCaseDescription={singleNewCaseDescription} + activeTab={activeTab} + isStartingExport={isStartingExport} onStartExport={onStartExport} + setActiveTab={setActiveTab} setName={setName} setSelectedCaseId={setSelectedCaseId} + setSingleNewCaseName={setSingleNewCaseName} + setSingleNewCaseDescription={setSingleNewCaseDescription} setRange={setRange} setMode={setMode} onCancel={handleCancel} @@ -222,29 +318,205 @@ type ExportContentProps = { range?: TimeRange; name: string; selectedCaseId?: string; - onStartExport: () => void; + singleNewCaseName: string; + singleNewCaseDescription: string; + activeTab: ExportTab; + isStartingExport: boolean; + onStartExport: () => Promise; + setActiveTab: (tab: ExportTab) => void; setName: (name: string) => void; setSelectedCaseId: (caseId: string | undefined) => void; + setSingleNewCaseName: (name: string) => void; + setSingleNewCaseDescription: (description: string) => void; setRange: (range: TimeRange | undefined) => void; setMode: (mode: ExportMode) => void; onCancel: () => void; }; + export function ExportContent({ latestTime, currentTime, range, name, selectedCaseId, + singleNewCaseName, + singleNewCaseDescription, + activeTab, + isStartingExport, onStartExport, + setActiveTab, setName, setSelectedCaseId, + setSingleNewCaseName, + setSingleNewCaseDescription, setRange, setMode, onCancel, }: ExportContentProps) { const { t } = useTranslation(["components/dialog"]); + const navigate = useNavigate(); + const isAdmin = useIsAdmin(); const [selectedOption, setSelectedOption] = useState("1"); - const { data: cases } = useSWR("cases"); + const { data: cases } = useSWR(isAdmin ? "cases" : null); + const { data: config } = useSWR("config"); + const [debouncedRange, setDebouncedRange] = useState( + range, + ); + const [selectedCameraIds, setSelectedCameraIds] = useState([]); + const [batchCaseSelection, setBatchCaseSelection] = useState( + selectedCaseId || "none", + ); + const [hasManualCameraSelection, setHasManualCameraSelection] = + useState(false); + const [newCaseName, setNewCaseName] = useState(""); + const [newCaseDescription, setNewCaseDescription] = useState(""); + const [isStartingBatchExport, setIsStartingBatchExport] = useState(false); + const multiRangeKey = useMemo(() => { + if (activeTab !== "multi" || !range) { + return undefined; + } + + return `${Math.round(range.after)}-${Math.round(range.before)}`; + }, [activeTab, range]); + + useEffect(() => { + if (activeTab !== "multi") { + setDebouncedRange(undefined); + return; + } + + if (!range) { + setDebouncedRange(undefined); + return; + } + + const timeoutId = window.setTimeout(() => { + setDebouncedRange(range); + }, 300); + + return () => window.clearTimeout(timeoutId); + }, [activeTab, range]); + + useEffect(() => { + if (activeTab !== "multi") { + return; + } + + if (selectedCaseId) { + setBatchCaseSelection(selectedCaseId); + return; + } + + if ((cases?.length ?? 0) === 0) { + setBatchCaseSelection("new"); + return; + } + + setBatchCaseSelection("new"); + }, [activeTab, cases?.length, selectedCaseId]); + + useEffect(() => { + setHasManualCameraSelection(false); + }, [multiRangeKey]); + + useEffect(() => { + if (activeTab !== "multi" || range) { + return; + } + + setRange({ + before: latestTime, + after: latestTime - 3600, + }); + }, [activeTab, latestTime, range, setRange]); + + const { data: events, isLoading: isEventsLoading } = useSWR( + activeTab === "multi" && debouncedRange + ? [ + "events", + { + after: Math.round(debouncedRange.after), + before: Math.round(debouncedRange.before), + limit: 500, + }, + ] + : null, + ); + + const cameraActivities = useMemo(() => { + const allCameraIds = Object.keys(config?.cameras ?? {}); + const byCamera = new Map(); + + events?.forEach((event) => { + const bucket = byCamera.get(event.camera); + if (bucket) { + bucket.push(event); + } else { + byCamera.set(event.camera, [event]); + } + }); + + const rangeStart = debouncedRange?.after ?? 0; + const rangeEnd = debouncedRange?.before ?? 0; + const rangeDuration = Math.max(1, rangeEnd - rangeStart); + + return allCameraIds.map((cameraId) => { + const cameraEvents = byCamera.get(cameraId) ?? []; + const segments = cameraEvents + .map((event) => { + // Event end_time is null for in-progress events; fall back to start. + const eventEnd = event.end_time ?? event.start_time; + const start = Math.max( + 0, + Math.min(1, (event.start_time - rangeStart) / rangeDuration), + ); + const end = Math.max( + 0, + Math.min(1, (eventEnd - rangeStart) / rangeDuration), + ); + return { start, end: Math.max(end, start) }; + }) + .sort((a, b) => a.start - b.start); + + return { + camera: cameraId, + count: cameraEvents.length, + hasDetections: cameraEvents.length > 0, + segments, + }; + }); + }, [config?.cameras, debouncedRange, events]); + + useEffect(() => { + if ( + activeTab !== "multi" || + !config || + isEventsLoading || + hasManualCameraSelection + ) { + return; + } + + setSelectedCameraIds( + cameraActivities + .filter((activity) => activity.hasDetections) + .map((activity) => activity.camera), + ); + }, [ + activeTab, + cameraActivities, + config, + hasManualCameraSelection, + isEventsLoading, + ]); + + const selectedCameraCount = selectedCameraIds.length; + const canStartBatchExport = + Boolean(range && range.before > range.after) && + selectedCameraCount > 0 && + !isStartingBatchExport && + (batchCaseSelection !== "new" || newCaseName.trim().length > 0) && + batchCaseSelection.length > 0; const onSelectTime = useCallback( (option: ExportOption) => { @@ -252,6 +524,7 @@ export function ExportContent({ const now = new Date(latestTime * 1000); let start = 0; + switch (option) { case "1": now.setHours(now.getHours() - 1); @@ -276,6 +549,8 @@ export function ExportContent({ case "custom": start = latestTime - 3600; break; + default: + start = latestTime - 3600; } setRange({ @@ -286,99 +561,486 @@ export function ExportContent({ [latestTime, setRange], ); + const toggleCameraSelection = useCallback((cameraId: string) => { + setHasManualCameraSelection(true); + setSelectedCameraIds((previous) => + previous.includes(cameraId) + ? previous.filter((selectedId) => selectedId !== cameraId) + : [...previous, cameraId], + ); + }, []); + + const startBatchExport = useCallback(async () => { + if (isStartingBatchExport) { + return; + } + + if (!range) { + toast.error(t("export.toast.error.noVaildTimeSelected"), { + position: "top-center", + }); + return; + } + + if (range.before <= range.after) { + toast.error(t("export.toast.error.endTimeMustAfterStartTime"), { + position: "top-center", + }); + return; + } + + const payload: BatchExportBody = { + items: selectedCameraIds.map((cameraId) => ({ + camera: cameraId, + start_time: Math.round(range.after), + end_time: Math.round(range.before), + friendly_name: name + ? `${name} - ${resolveCameraName(config, cameraId)}` + : undefined, + })), + }; + + if (isAdmin && batchCaseSelection !== "none") { + if (batchCaseSelection === "new") { + payload.new_case_name = newCaseName.trim(); + payload.new_case_description = newCaseDescription.trim() || undefined; + } else { + payload.export_case_id = batchCaseSelection; + } + } + + setIsStartingBatchExport(true); + + try { + const response = await axios.post( + "exports/batch", + payload, + ); + const results = response.data.results; + const successfulResults = results.filter((result) => result.success); + const failedResults = results.filter((result) => !result.success); + const failedSummary = failedResults + .map((result) => { + const cameraName = resolveCameraName(config, result.camera); + return result.error ? `${cameraName}: ${result.error}` : cameraName; + }) + .join(", "); + + if (failedResults.length > 0 && successfulResults.length > 0) { + toast.success( + t("export.toast.batchQueuedPartial", { + successful: successfulResults.length, + total: results.length, + failedCameras: failedResults + .map((result) => resolveCameraName(config, result.camera)) + .join(", "), + }), + { + position: "top-center", + description: failedSummary, + }, + ); + } else if (failedResults.length > 0) { + toast.error( + t("export.toast.batchQueueFailed", { + total: results.length, + failedCameras: failedResults + .map((result) => resolveCameraName(config, result.camera)) + .join(", "), + }), + { + position: "top-center", + description: failedSummary, + }, + ); + } else { + toast.success( + t("export.toast.batchQueuedSuccess", { + count: successfulResults.length, + }), + { position: "top-center" }, + ); + } + + if (successfulResults.length > 0) { + setName(""); + setSelectedCaseId(undefined); + setBatchCaseSelection("new"); + setNewCaseName(""); + setNewCaseDescription(""); + setRange(undefined); + setMode("none"); + setActiveTab("export"); + if (response.data.export_case_id) { + navigate(`/export?caseId=${response.data.export_case_id}`); + } + } + } catch (error) { + const apiError = error as { + response?: { data?: { message?: string; detail?: string } }; + }; + const errorMessage = + apiError.response?.data?.message || + apiError.response?.data?.detail || + "Unknown error"; + + toast.error( + t("export.toast.error.failed", { + error: errorMessage, + }), + { position: "top-center" }, + ); + } finally { + setIsStartingBatchExport(false); + } + }, [ + batchCaseSelection, + config, + isAdmin, + isStartingBatchExport, + name, + newCaseDescription, + newCaseName, + range, + selectedCameraIds, + setActiveTab, + setMode, + setName, + setRange, + setSelectedCaseId, + t, + navigate, + ]); + return ( -
+
{isDesktop && ( - <> - - {t("menu.export", { ns: "common" })} - - - + + {t("menu.export", { ns: "common" })} + )} - onSelectTime(value as ExportOption)} + + setActiveTab(value as ExportTab)} + className={cn("w-full", !isDesktop && "flex min-h-0 flex-1 flex-col")} > - {EXPORT_OPTIONS.map((opt) => { - return ( -
- - -
- ); - })} -
- {selectedOption == "custom" && ( - - )} - setName(e.target.value)} - /> -
- - -
+ {isNaN(parseInt(opt)) + ? opt == "timeline" + ? t("export.time.fromTimeline") + : t(`export.time.${opt}`) + : t("export.time.lastHour", { + count: parseInt(opt), + })} + +
+ ))} + + + {selectedOption == "custom" && ( + + )} + + setName(e.target.value)} + /> + + {isAdmin && ( +
+ + + {selectedCaseId === "new" && ( +
+ setSingleNewCaseName(e.target.value)} + /> +