diff --git a/Makefile b/Makefile index 7b4d4b314..3cf7d0fb8 100644 --- a/Makefile +++ b/Makefile @@ -15,10 +15,10 @@ amd64_ffmpeg: docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64 --file docker/Dockerfile.ffmpeg.amd64 . nginx_frigate: - docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag blakeblackshear/frigate-nginx:1.0.0 --file docker/Dockerfile.nginx . + docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag blakeblackshear/frigate-nginx:1.0.2 --file docker/Dockerfile.nginx . amd64_frigate: version web - docker build --no-cache --tag frigate-base --build-arg ARCH=amd64 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . + docker build --no-cache --tag frigate-base --build-arg ARCH=amd64 --build-arg FFMPEG_VERSION=1.1.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base . docker build --no-cache --tag frigate --file docker/Dockerfile.amd64 . amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate @@ -30,7 +30,7 @@ amd64nvidia_ffmpeg: docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64nvidia --file docker/Dockerfile.ffmpeg.amd64nvidia . amd64nvidia_frigate: version web - docker build --no-cache --tag frigate-base --build-arg ARCH=amd64nvidia --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . + docker build --no-cache --tag frigate-base --build-arg ARCH=amd64nvidia --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base . docker build --no-cache --tag frigate --file docker/Dockerfile.amd64nvidia . amd64nvidia_all: amd64nvidia_wheels amd64nvidia_ffmpeg amd64nvidia_frigate @@ -42,7 +42,7 @@ aarch64_ffmpeg: docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 . aarch64_frigate: version web - docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . + docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base . docker build --no-cache --tag frigate --file docker/Dockerfile.aarch64 . armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate @@ -54,7 +54,7 @@ armv7_ffmpeg: docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-armv7 --file docker/Dockerfile.ffmpeg.armv7 . armv7_frigate: version web - docker build --no-cache --tag frigate-base --build-arg ARCH=armv7 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . + docker build --no-cache --tag frigate-base --build-arg ARCH=armv7 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base . docker build --no-cache --tag frigate --file docker/Dockerfile.armv7 . armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate diff --git a/README.md b/README.md index 012a856af..94f80db2d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but - Uses a very low overhead motion detection to determine where to run object detection - Object detection with TensorFlow runs in separate processes for maximum FPS - Communicates over MQTT for easy integration into other systems -- Records video clips of detected objects +- Records video with retention settings based on detected objects - 24/7 recording - Re-streaming via RTMP to reduce the number of connections to your camera @@ -23,16 +23,20 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but View the documentation at https://blakeblackshear.github.io/frigate ## Donations + If you would like to make a donation to support development, please use [Github Sponsors](https://github.com/sponsors/blakeblackshear). ## Screenshots + Integration into Home Assistant +
Also comes with a builtin UI: +
diff --git a/docker/Dockerfile.aarch64 b/docker/Dockerfile.aarch64 index 5ce548f2d..30d69fc83 100644 --- a/docker/Dockerfile.aarch64 +++ b/docker/Dockerfile.aarch64 @@ -5,18 +5,24 @@ ENV DEBIAN_FRONTEND=noninteractive # Install packages for apt repo RUN apt-get -qq update \ && apt-get -qq install --no-install-recommends -y \ - # ffmpeg runtime dependencies - libgomp1 \ - # runtime dependencies - libopenexr24 \ - libgstreamer1.0-0 \ - libgstreamer-plugins-base1.0-0 \ - libopenblas-base \ - libjpeg-turbo8 \ - libpng16-16 \ - libtiff5 \ - libdc1394-22 \ - ## Tensorflow lite - && pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_aarch64.whl \ + # ffmpeg runtime dependencies + libgomp1 \ + # runtime dependencies + libopenexr24 \ + libgstreamer1.0-0 \ + libgstreamer-plugins-base1.0-0 \ + libopenblas-base \ + libjpeg-turbo8 \ + libpng16-16 \ + libtiff5 \ + libdc1394-22 \ && rm -rf /var/lib/apt/lists/* \ - && (apt-get autoremove -y; apt-get autoclean -y) \ No newline at end of file + && (apt-get autoremove -y; apt-get autoclean -y) + +# s6-overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-aarch64-installer /tmp/ +RUN chmod +x /tmp/s6-overlay-aarch64-installer && /tmp/s6-overlay-aarch64-installer / + +ENTRYPOINT ["/init"] + +CMD ["python3", "-u", "-m", "frigate"] \ No newline at end of file diff --git a/docker/Dockerfile.amd64 b/docker/Dockerfile.amd64 index cb02aaaa3..d583e43f8 100644 --- a/docker/Dockerfile.amd64 +++ b/docker/Dockerfile.amd64 @@ -16,9 +16,13 @@ RUN apt-get -qq update \ libgomp1 \ # VAAPI drivers for Intel hardware accel libva-drm2 libva2 libmfx1 i965-va-driver vainfo intel-media-va-driver-non-free mesa-vdpau-drivers mesa-va-drivers mesa-vdpau-drivers libdrm-radeon1 \ - ## Tensorflow lite - && wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ - && python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ - && rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ && rm -rf /var/lib/apt/lists/* \ - && (apt-get autoremove -y; apt-get autoclean -y) \ No newline at end of file + && (apt-get autoremove -y; apt-get autoclean -y) + +# s6-overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-amd64-installer /tmp/ +RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer / + +ENTRYPOINT ["/init"] + +CMD ["python3", "-u", "-m", "frigate"] \ No newline at end of file diff --git a/docker/Dockerfile.amd64nvidia b/docker/Dockerfile.amd64nvidia index 8714f70ad..f893d684f 100644 --- a/docker/Dockerfile.amd64nvidia +++ b/docker/Dockerfile.amd64nvidia @@ -4,12 +4,8 @@ LABEL maintainer "blakeb@blakeshome.com" # Install packages for apt repo RUN apt-get -qq update \ && apt-get -qq install --no-install-recommends -y \ - # ffmpeg dependencies - libgomp1 \ - ## Tensorflow lite - && wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ - && python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ - && rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \ + # ffmpeg dependencies + libgomp1 \ && rm -rf /var/lib/apt/lists/* \ && (apt-get autoremove -y; apt-get autoclean -y) @@ -45,3 +41,11 @@ ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64 ENV NVIDIA_VISIBLE_DEVICES all ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video ENV NVIDIA_REQUIRE_CUDA "cuda>=11.1 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 brand=tesla,driver>=450,driver<451" + +# s6-overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-amd64-installer /tmp/ +RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer / + +ENTRYPOINT ["/init"] + +CMD ["python3", "-u", "-m", "frigate"] \ No newline at end of file diff --git a/docker/Dockerfile.armv7 b/docker/Dockerfile.armv7 index 2e50cd22c..af44301b8 100644 --- a/docker/Dockerfile.armv7 +++ b/docker/Dockerfile.armv7 @@ -5,20 +5,26 @@ ENV DEBIAN_FRONTEND=noninteractive # Install packages for apt repo RUN apt-get -qq update \ && apt-get -qq install --no-install-recommends -y \ - # ffmpeg runtime dependencies - libgomp1 \ - # runtime dependencies - libopenexr24 \ - libgstreamer1.0-0 \ - libgstreamer-plugins-base1.0-0 \ - libopenblas-base \ - libjpeg-turbo8 \ - libpng16-16 \ - libtiff5 \ - libdc1394-22 \ - libaom0 \ - libx265-179 \ - ## Tensorflow lite - && pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_armv7l.whl \ + # ffmpeg runtime dependencies + libgomp1 \ + # runtime dependencies + libopenexr24 \ + libgstreamer1.0-0 \ + libgstreamer-plugins-base1.0-0 \ + libopenblas-base \ + libjpeg-turbo8 \ + libpng16-16 \ + libtiff5 \ + libdc1394-22 \ + libaom0 \ + libx265-179 \ && rm -rf /var/lib/apt/lists/* \ - && (apt-get autoremove -y; apt-get autoclean -y) \ No newline at end of file + && (apt-get autoremove -y; apt-get autoclean -y) + +# s6-overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-armhf-installer /tmp/ +RUN chmod +x /tmp/s6-overlay-armhf-installer && /tmp/s6-overlay-armhf-installer / + +ENTRYPOINT ["/init"] + +CMD ["python3", "-u", "-m", "frigate"] \ No newline at end of file diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index f80e3c8f8..8b025a23c 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -10,10 +10,6 @@ FROM frigate-web as web FROM ubuntu:20.04 LABEL maintainer "blakeb@blakeshome.com" -# s6-overlay -ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.1/s6-overlay-amd64-installer /tmp/ -RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer / - COPY --from=ffmpeg /usr/local /usr/local/ COPY --from=wheels /wheels/. /wheels/ @@ -30,7 +26,7 @@ RUN apt-get -qq update \ && APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn apt-key adv --fetch-keys https://packages.cloud.google.com/apt/doc/apt-key.gpg \ && echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \ && echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \ - && apt-get -qq update && apt-get -qq install --no-install-recommends -y libedgetpu1-max=15.0 \ + && apt-get -qq update && apt-get -qq install --no-install-recommends -y libedgetpu1-max python3-tflite-runtime python3-pycoral \ && rm -rf /var/lib/apt/lists/* /wheels \ && (apt-get autoremove -y; apt-get autoclean -y) @@ -57,7 +53,3 @@ COPY docker/rootfs/ / EXPOSE 5000 EXPOSE 1935 - -ENTRYPOINT ["/init"] - -CMD ["python3", "-u", "-m", "frigate"] diff --git a/docker/Dockerfile.nginx b/docker/Dockerfile.nginx index bfa7d277a..72e15f8e0 100644 --- a/docker/Dockerfile.nginx +++ b/docker/Dockerfile.nginx @@ -10,6 +10,7 @@ FROM base as build ARG NGINX_VERSION=1.18.0 ARG VOD_MODULE_VERSION=1.28 +ARG SECURE_TOKEN_MODULE_VERSION=1.4 ARG RTMP_MODULE_VERSION=1.2.1 RUN cp /etc/apt/sources.list /etc/apt/sources.list~ \ @@ -23,6 +24,10 @@ RUN apt-get -yqq install --no-install-recommends curl \ && curl -sL https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz | tar -C /tmp/nginx -zx --strip-components=1 \ && mkdir /tmp/nginx-vod-module \ && curl -sL https://github.com/kaltura/nginx-vod-module/archive/refs/tags/${VOD_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-vod-module -zx --strip-components=1 \ + # Patch MAX_CLIPS to allow more clips to be added than the default 128 + && sed -i 's/MAX_CLIPS (128)/MAX_CLIPS (1080)/g' /tmp/nginx-vod-module/vod/media_set.h \ + && mkdir /tmp/nginx-secure-token-module \ + && curl -sL https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-secure-token-module -zx --strip-components=1 \ && mkdir /tmp/nginx-rtmp-module \ && curl -sL https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-rtmp-module -zx --strip-components=1 @@ -34,6 +39,7 @@ RUN ./configure --prefix=/usr/local/nginx \ --with-http_ssl_module \ --with-threads \ --add-module=../nginx-vod-module \ + --add-module=../nginx-secure-token-module \ --add-module=../nginx-rtmp-module \ --with-cc-opt="-O3 -Wno-error=implicit-fallthrough" diff --git a/docker/rootfs/usr/local/nginx/conf/nginx.conf b/docker/rootfs/usr/local/nginx/conf/nginx.conf index 7e372d722..259d2668a 100644 --- a/docker/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/rootfs/usr/local/nginx/conf/nginx.conf @@ -71,6 +71,9 @@ http { location /vod/ { vod hls; + secure_token $args; + secure_token_types application/vnd.apple.mpegurl; + add_header Access-Control-Allow-Headers '*'; add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range'; add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS'; @@ -122,6 +125,11 @@ http { root /media/frigate; } + location /cache/ { + internal; # This tells nginx it's not accessible from the outside + alias /tmp/cache/; + } + location /recordings/ { add_header 'Access-Control-Allow-Origin' "$http_origin" always; add_header 'Access-Control-Allow-Credentials' 'true'; diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 1db060e36..eafc91d99 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -16,7 +16,7 @@ motion: # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # The value should be between 1 and 255. threshold: 25 - # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.4% of the motion frame area) + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.17% of the motion frame area) # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller # moving objects. contour_area: 100 @@ -29,7 +29,7 @@ motion: # Low values will cause things like moving shadows to be detected as motion for longer. # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ frame_alpha: 0.2 - # Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 120) + # Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 180) # This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage. # Lower values result in less CPU, but small changes may not register as motion. frame_height: 180 @@ -81,15 +81,15 @@ environment_vars: ### `database` -Event and clip information is managed in a sqlite database at `/media/frigate/clips/frigate.db`. If that database is deleted, clips will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. +Event and recording information is managed in a sqlite database at `/media/frigate/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. -If you are storing your clips on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. +If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. -This may need to be in a custom location if network storage is used for clips. +This may need to be in a custom location if network storage is used for the media folder. ```yaml database: - path: /media/frigate/clips/frigate.db + path: /media/frigate/frigate.db ``` ### `detectors` @@ -110,10 +110,17 @@ detectors: ### `model` +If using a custom model, the width and height will need to be specified. + +The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model. + ```yaml model: # Required: height of the trained model height: 320 # Required: width of the trained model width: 320 + # Optional: labelmap overrides + labelmap: + 7: car ``` diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index e22912105..7592a5560 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -5,16 +5,15 @@ title: Cameras ## Setting Up Camera Inputs -Up to 4 inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create clips from a higher resolution stream, or vice versa. +Up to 4 inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. Each role can only be assigned to one input per camera. The options for roles are as follows: -| Role | Description | -| -------- | ------------------------------------------------------------------------------------ | -| `detect` | Main feed for object detection | -| `clips` | Clips of events from objects detected in the `detect` feed. [docs](#recording-clips) | -| `record` | Saves 60 second segments of the video feed. [docs](#247-recordings) | -| `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](#rtmp-streams) | +| Role | Description | +| -------- | ------------------------------------------------------------------------------------- | +| `detect` | Main feed for object detection | +| `record` | Saves segments of the video feed based on configuration settings. [docs](#recordings) | +| `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](#rtmp-streams) | ### Example @@ -31,13 +30,15 @@ cameras: - rtmp - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live roles: - - clips - record - width: 1280 - height: 720 - fps: 5 + detect: + width: 1280 + height: 720 + fps: 5 ``` +`width`, `height`, and `fps` are only used for the `detect` role. Other streams are passed through, so there is no need to specify the resolution. + ## Masks & Zones ### Masks @@ -93,6 +94,9 @@ zones: # Required: List of x,y coordinates to define the polygon of the zone. # NOTE: Coordinates can be generated at https://www.image-map.net/ coordinates: 545,1077,747,939,788,805 + # Optional: List of objects that can trigger this zone (default: all tracked objects) + objects: + - person # Optional: Zone level object filters. # NOTE: The global and camera filters are applied upstream. filters: @@ -127,37 +131,48 @@ objects: mask: 0,0,1000,0,1000,200,0,200 ``` -## Clips +## Recordings -Frigate can save video clips without any CPU overhead for encoding by simply copying the stream directly with FFmpeg. It leverages FFmpeg's segment functionality to maintain a cache of video for each camera. The cache files are written to disk at `/tmp/cache` and do not introduce memory overhead. When an object is being tracked, it will extend the cache to ensure it can assemble a clip when the event ends. Once the event ends, it again uses FFmpeg to assemble a clip by combining the video clips without any encoding by the CPU. Assembled clips are are saved to `/media/frigate/clips`. Clips are retained according to the retention settings defined on the config for each object type. +24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. -These clips will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. +Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. + +These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. :::caution Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. ::: ```yaml -clips: - # Required: enables clips for the camera (default: shown below) - # This value can be set via MQTT and will be updated in startup based on retained value +record: + # Optional: Enable recording (default: shown below) enabled: False - # Optional: Number of seconds before the event to include in the clips (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include in the clips (default: shown below) - post_capture: 5 - # Optional: Objects to save clips for. (default: all tracked objects) - objects: - - person - # Optional: Restrict clips to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days + # Optional: Number of days to retain (default: shown below) + retain_days: 0 + # Optional: Event recording settings + events: + # Optional: Enable event recording retention settings (default: shown below) + enabled: False + # Optional: Maximum length of time to retain video during long events. (default: shown below) + # NOTE: If an object is being tracked for longer than this amount of time, the cache + # will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0. + max_seconds: 300 + # Optional: Number of seconds before the event to include in the event (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the event to include in the event (default: shown below) + post_capture: 5 + # Optional: Objects to save event for. (default: all tracked objects) objects: - person: 15 + - person + # Optional: Restrict event to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Retention settings for event + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 ``` ## Snapshots @@ -194,23 +209,6 @@ snapshots: person: 15 ``` -## 24/7 Recordings - -24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. - -:::caution -Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. -::: - -```yaml -# Optional: 24/7 recording configuration -record: - # Optional: Enable recording (default: global setting) - enabled: False - # Optional: Number of days to retain (default: global setting) - retain_days: 30 -``` - ## RTMP streams Frigate can re-stream your video feed as a RTMP feed for other applications such as Home Assistant to utilize it at `rtmp:///live/`. Port 1935 must be open. This allows you to use a video feed for detection in frigate and Home Assistant live view at the same time without having to make two separate connections to the camera. The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. @@ -263,8 +261,8 @@ cameras: # Required: the path to the stream # NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {} - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - # Required: list of roles for this stream. valid values are: detect,record,clips,rtmp - # NOTICE: In addition to assigning the record, clips, and rtmp roles, + # Required: list of roles for this stream. valid values are: detect,record,rtmp + # NOTICE: In addition to assigning the record, and rtmp roles, # they must also be enabled in the camera config. roles: - detect @@ -284,14 +282,20 @@ cameras: # Optional: camera specific output args (default: inherit) output_args: - # Required: width of the frame for the input with the detect role - width: 1280 - # Required: height of the frame for the input with the detect role - height: 720 - # Optional: desired fps for your camera for the input with the detect role - # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. - # Frigate will attempt to autodetect if not specified. - fps: 5 + # Required: Camera level detect settings + detect: + # Required: width of the frame for the input with the detect role + width: 1280 + # Required: height of the frame for the input with the detect role + height: 720 + # Required: desired fps for your camera for the input with the detect role + # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. + fps: 5 + # Optional: enables detection for the camera (default: True) + # This value can be set via MQTT and will be updated in startup based on retained value + enabled: True + # Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) + max_disappeared: 25 # Optional: camera level motion config motion: @@ -312,6 +316,9 @@ cameras: # Required: List of x,y coordinates to define the polygon of the zone. # NOTE: Coordinates can be generated at https://www.image-map.net/ coordinates: 545,1077,747,939,788,805 + # Optional: List of objects that can trigger this zone (default: all tracked objects) + objects: + - person # Optional: Zone level object filters. # NOTE: The global and camera filters are applied upstream. filters: @@ -320,42 +327,33 @@ cameras: max_area: 100000 threshold: 0.7 - # Optional: Camera level detect settings - detect: - # Optional: enables detection for the camera (default: True) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: True - # Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) - max_disappeared: 25 - - # Optional: save clips configuration - clips: - # Required: enables clips for the camera (default: shown below) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: False - # Optional: Number of seconds before the event to include in the clips (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include in the clips (default: shown below) - post_capture: 5 - # Optional: Objects to save clips for. (default: all tracked objects) - objects: - - person - # Optional: Restrict clips to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 - # Optional: 24/7 recording configuration record: # Optional: Enable recording (default: global setting) enabled: False # Optional: Number of days to retain (default: global setting) retain_days: 30 + # Optional: Event recording settings + events: + # Required: enables event recordings for the camera (default: shown below) + # This value can be set via MQTT and will be updated in startup based on retained value + enabled: False + # Optional: Number of seconds before the event to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the event to include (default: shown below) + post_capture: 5 + # Optional: Objects to save events for. (default: all tracked objects) + objects: + - person + # Optional: Restrict events to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Camera override for retention settings (default: global values) + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 # Optional: RTMP re-stream configuration rtmp: @@ -364,7 +362,7 @@ cameras: # Optional: Live stream configuration for WebUI live: - # Optional: Set the height of the live stream. (default: detect stream height) + # Optional: Set the height of the live stream. (default: 720) # This must be less than or equal to the height of the detect stream. Lower resolutions # reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio. height: 720 @@ -483,12 +481,11 @@ input_args: - "1" ``` -Note that mjpeg cameras require encoding the video into h264 for clips, recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. +Note that mjpeg cameras require encoding the video into h264 for recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. ```yaml output_args: record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an - clips: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an rtmp: -c:v libx264 -an -f flv ``` diff --git a/docs/docs/configuration/detectors.md b/docs/docs/configuration/detectors.md index a7bbdab79..58ec474ef 100644 --- a/docs/docs/configuration/detectors.md +++ b/docs/docs/configuration/detectors.md @@ -30,6 +30,15 @@ detectors: device: usb:1 ``` +Native Coral (Dev Board): + +```yaml +detectors: + coral: + type: edgetpu + device: '' +``` + Multiple PCIE/M.2 Corals: ```yaml diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 92cea7d4b..9128fead5 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -20,9 +20,10 @@ cameras: roles: - detect - rtmp - width: 1280 - height: 720 - fps: 5 + detect: + width: 1280 + height: 720 + fps: 5 ``` ## Required @@ -76,22 +77,103 @@ cameras: roles: - detect - rtmp - width: 1280 - height: 720 - fps: 5 + detect: + width: 1280 + height: 720 + fps: 5 ``` ## Optional -### `clips` +### `database` ```yaml -clips: - # Optional: Maximum length of time to retain video during long events. (default: shown below) - # NOTE: If an object is being tracked for longer than this amount of time, the cache - # will begin to expire and the resulting clip will be the last x seconds of the event. - max_seconds: 300 - # Optional: Retention settings for clips (default: shown below) +database: + # The path to store the SQLite DB (default: shown below) + path: /media/frigate/frigate.db +``` + +### `model` + +```yaml +# Optional: model modifications +model: + # Required: Object detection model input width (default: shown below) + width: 320 + # Required: Object detection model input height (default: shown below) + height: 320 + # Optional: Label name modifications + labelmap: + 2: vehicle # previously "car" +``` + +### `detectors` + +Check the [detectors configuration page](detectors.md) for a complete list of options. + +### `logger` + +```yaml +# Optional: logger verbosity settings +logger: + # Optional: Default log verbosity (default: shown below) + default: info + # Optional: Component specific logger overrides + logs: + frigate.event: debug +``` + +### `record` + +Can be overridden at the camera level. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. + +Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. + +These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. + +:::caution +Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. +::: + +```yaml +record: + # Optional: Enable recording (default: shown below) + enabled: False + # Optional: Number of days to retain (default: shown below) + retain_days: 0 + # Optional: Event recording settings + events: + # Optional: Enable event recording retention settings (default: shown below) + enabled: False + # Optional: Maximum length of time to retain video during long events. (default: shown below) + # NOTE: If an object is being tracked for longer than this amount of time, the cache + # will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0. + max_seconds: 300 + # Optional: Number of seconds before the event to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the event to include (default: shown below) + post_capture: 5 + # Optional: Objects to save recordings for. (default: all tracked objects) + objects: + - person + # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Retention settings for events + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 +``` + +## `snapshots` + +Can be overridden at the camera level. Global snapshot retention settings. + +```yaml +# Optional: Configuration for the jpg snapshots written to the clips directory for each event +snapshots: retain: # Required: Default retention days (default: shown below) default: 10 @@ -102,6 +184,8 @@ clips: ### `ffmpeg` +Can be overridden at the camera level. + ```yaml ffmpeg: # Optional: global ffmpeg args (default: shown below) @@ -117,8 +201,6 @@ ffmpeg: detect: -f rawvideo -pix_fmt yuv420p # Optional: output args for record streams (default: shown below) record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an - # Optional: output args for clips streams (default: shown below) - clips: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an # Optional: output args for rtmp streams (default: shown below) rtmp: -c copy -f flv ``` @@ -145,22 +227,6 @@ objects: threshold: 0.7 ``` -### `record` - -Can be overridden at the camera level. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. - -:::caution -Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. -::: - -```yaml -record: - # Optional: Enable recording - enabled: False - # Optional: Number of days to retain - retain_days: 30 -``` - ### `birdseye` A dynamic combined camera view of all tracked cameras. This is optimized for minimal bandwidth and server resource utilization. Encoding is only performed when actively viewing the video feed, and only active (defined by the mode) cameras are included in the view. diff --git a/docs/docs/configuration/objects.mdx b/docs/docs/configuration/objects.mdx index 3e95f9e83..a8608c286 100644 --- a/docs/docs/configuration/objects.mdx +++ b/docs/docs/configuration/objects.mdx @@ -4,13 +4,13 @@ title: Default available objects sidebar_label: Available objects --- -import labels from '../../../labelmap.txt'; +import labels from "../../../labelmap.txt"; By default, Frigate includes the following object models from the Google Coral test data.
    - {labels.split('\n').map((label) => ( -
  • {label.replace(/^\d+\s+/, '')}
  • + {labels.split("\n").map((label) => ( +
  • {label.replace(/^\d+\s+/, "")}
  • ))}
@@ -23,14 +23,3 @@ Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use yo - Labels: `/labelmap.txt` You also need to update the model width/height in the config if they differ from the defaults. - -### Customizing the Labelmap - -The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. You must retain the same number of labels, but you can change the names. To change: - -- Download the [COCO labelmap](https://dl.google.com/coral/canned_models/coco_labels.txt) -- Modify the label names as desired. For example, change `7 truck` to `7 car` -- Mount the new file at `/labelmap.txt` in the container with an additional volume - ``` - -v ./config/labelmap.txt:/labelmap.txt - ``` diff --git a/docs/docs/contributing.md b/docs/docs/contributing.md index cfd70ad66..a818000b4 100644 --- a/docs/docs/contributing.md +++ b/docs/docs/contributing.md @@ -63,17 +63,17 @@ cameras: roles: - detect - rtmp - - clips - height: 1080 - width: 1920 - fps: 5 + detect: + height: 1080 + width: 1920 + fps: 5 ``` These input args tell ffmpeg to read the mp4 file in an infinite loop. You can use any valid ffmpeg input here. #### 3. Gather some mp4 files for testing -Create and place these files in a `debug` folder in the root of the repo. This is also where clips and recordings will be created if you enable them in your test config. Update your config from step 2 above to point at the right file. You can check the `docker-compose.yml` file in the repo to see how the volumes are mapped. +Create and place these files in a `debug` folder in the root of the repo. This is also where recordings will be created if you enable them in your test config. Update your config from step 2 above to point at the right file. You can check the `docker-compose.yml` file in the repo to see how the volumes are mapped. #### 4. Open the repo with Visual Studio Code diff --git a/docs/docs/hardware.md b/docs/docs/hardware.md index 5cf20df87..a68a5e120 100644 --- a/docs/docs/hardware.md +++ b/docs/docs/hardware.md @@ -5,7 +5,7 @@ title: Recommended hardware ## Cameras -Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, clips, and recordings without re-encoding. +Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, and recordings without re-encoding. ## Computer diff --git a/docs/docs/installation.md b/docs/docs/installation.md index c3f916ad1..67e8252ae 100644 --- a/docs/docs/installation.md +++ b/docs/docs/installation.md @@ -5,7 +5,7 @@ title: Installation Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). See instructions below for installing the HassOS addon. -For Home Assistant users, there is also a [custom component (aka integration)](https://github.com/blakeblackshear/frigate-hass-integration). This custom component adds tighter integration with Home Assistant by automatically setting up camera entities, sensors, media browser for clips and recordings, and a public API to simplify notifications. +For Home Assistant users, there is also a [custom component (aka integration)](https://github.com/blakeblackshear/frigate-hass-integration). This custom component adds tighter integration with Home Assistant by automatically setting up camera entities, sensors, media browser for recordings, and a public API to simplify notifications. Note that HassOS Addons and custom components are different things. If you are already running Frigate with Docker directly, you do not need the Addon since the Addon would run another instance of Frigate. diff --git a/docs/docs/troubleshooting.md b/docs/docs/troubleshooting.md index d7d12afa2..15ec9332b 100644 --- a/docs/docs/troubleshooting.md +++ b/docs/docs/troubleshooting.md @@ -3,25 +3,27 @@ id: troubleshooting title: Troubleshooting and FAQ --- -### How can I get sound or audio in my clips and recordings? -By default, Frigate removes audio from clips and recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/frigate/configuration/index#ffmpeg). +### I am seeing a solid green image for my camera. + +A solid green image means that frigate has not received any frames from ffmpeg. Check the logs to see why ffmpeg is exiting and adjust your ffmpeg args accordingly. + +### How can I get sound or audio in my recordings? + +By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/frigate/configuration/index#ffmpeg). ### My mjpeg stream or snapshots look green and crazy + This almost always means that the width/height defined for your camera are not correct. Double check the resolution with vlc or another player. Also make sure you don't have the width and height values backwards. ![mismatched-resolution](/img/mismatched-resolution.jpg) -### I have clips and snapshots in my clips folder, but I can't view them in the Web UI. -This is usually caused one of two things: - -- The permissions on the parent folder don't have execute and nginx returns a 403 error you can see in the browser logs - - In this case, try mounting a volume to `/media/frigate` inside the container instead of `/media/frigate/clips`. -- Your cameras do not send h264 encoded video and the mp4 files are not playable in the browser +### I can't view events or recordings in the Web UI. +Ensure your cameras send h264 encoded video ### "[mov,mp4,m4a,3gp,3g2,mj2 @ 0x5639eeb6e140] moov atom not found" -These messages in the logs are expected in certain situations. Frigate checks the integrity of the video cache before assembling clips. Occasionally these cached files will be invalid and cleaned up automatically. +These messages in the logs are expected in certain situations. Frigate checks the integrity of the recordings before storing. Occasionally these cached files will be invalid and cleaned up automatically. ### "On connect called" diff --git a/docs/docs/usage/api.md b/docs/docs/usage/api.md index 86824b5a8..b5d0eaa94 100644 --- a/docs/docs/usage/api.md +++ b/docs/docs/usage/api.md @@ -206,10 +206,6 @@ Accepts the following query string parameters, but they are only applied when an | `crop` | int | Crop the snapshot to the (0 or 1) | | `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. | -### `/clips/-.mp4` - -Video clip for the given camera and event id. - ### `/clips/-.jpg` JPG snapshot for the given camera and event id. diff --git a/docs/docs/usage/home-assistant.md b/docs/docs/usage/home-assistant.md index 45764e01a..5342ec98a 100644 --- a/docs/docs/usage/home-assistant.md +++ b/docs/docs/usage/home-assistant.md @@ -4,31 +4,93 @@ title: Integration with Home Assistant sidebar_label: Home Assistant --- -The best way to integrate with Home Assistant is to use the [official integration](https://github.com/blakeblackshear/frigate-hass-integration). When configuring the integration, you will be asked for the `Host` of your frigate instance. This value should be the url you use to access Frigate in the browser and will look like `http://:5000/`. If you are using HassOS with the addon, the host should be `http://ccab4aaf-frigate:5000` (or `http://ccab4aaf-frigate-beta:5000` if your are using the beta version of the addon). Home Assistant needs access to port 5000 (api) and 1935 (rtmp) for all features. The integration will setup the following entities within Home Assistant: +The best way to integrate with Home Assistant is to use the [official integration](https://github.com/blakeblackshear/frigate-hass-integration). -## Sensors: +## Installation -- Stats to monitor frigate performance -- Object counts for all zones and cameras +Available via HACS as a [custom repository](https://hacs.xyz/docs/faq/custom_repositories). To install: -## Cameras: +- Add the custom repository: -- Cameras for image of the last detected object for each camera -- Camera entities with stream support (requires RTMP) +``` +Home Assistant > HACS > Integrations > [...] > Custom Repositories +``` -## Media Browser: +| Key | Value | +| -------------- | ----------------------------------------------------------- | +| Repository URL | https://github.com/blakeblackshear/frigate-hass-integration | +| Category | Integration | -- Rich UI with thumbnails for browsing event clips +- Use [HACS](https://hacs.xyz/) to install the integration: + +``` +Home Assistant > HACS > Integrations > "Explore & Add Integrations" > Frigate +``` + +- Restart Home Assistant. +- Then add/configure the integration: + +``` +Home Assistant > Configuration > Integrations > Add Integration > Frigate +``` + +Note: You will also need +[media_source](https://www.home-assistant.io/integrations/media_source/) enabled +in your Home Assistant configuration for the Media Browser to appear. + +## Configuration + +When configuring the integration, you will be asked for the following parameters: + +| Variable | Description | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| URL | The `URL` of your frigate instance, the URL you use to access Frigate in the browser. This may look like `http://:5000/`. If you are using HassOS with the addon, the URL should be `http://ccab4aaf-frigate:5000` (or `http://ccab4aaf-frigate-beta:5000` if your are using the beta version of the addon). Live streams required port 1935, see [RTMP streams](#streams) | + + + +## Options + +``` +Home Assistant > Configuration > Integrations > Frigate > Options +``` + +| Option | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| RTMP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTMP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTMP streams](#streams) below. | + +## Entities Provided + +| Platform | Description | +| --------------- | --------------------------------------------------------------------------------- | +| `camera` | Live camera stream (requires RTMP), camera for image of the last detected object. | +| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. | +| `switch` | Switch entities to toggle detection, recordings and snapshots. | +| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. | + +## Media Browser Support + +The integration provides: + +- Rich UI with thumbnails for browsing event recordings - Rich UI for browsing 24/7 recordings by month, day, camera, time -## API: +This is accessible via "Media Browser" on the left menu panel in Home Assistant. + + + +## API - Notification API with public facing endpoints for images in notifications ### Notifications -Frigate publishes event information in the form of a change feed via MQTT. This allows lots of customization for notifications to meet your needs. Event changes are published with `before` and `after` information as shown [here](#frigateevents). -Note that some people may not want to expose frigate to the web, so you can leverage the HA API that frigate custom_integration ties into (which is exposed to the web, and thus can be used for mobile notifications etc): +Frigate publishes event information in the form of a change feed via MQTT. This +allows lots of customization for notifications to meet your needs. Event changes +are published with `before` and `after` information as shown +[here](#frigateevents). Note that some people may not want to expose frigate to +the web, so you can leverage the HA API that frigate custom_integration ties +into (which is exposed to the web, and thus can be used for mobile notifications +etc): To load an image taken by frigate from Home Assistants API see below: @@ -57,6 +119,7 @@ automation: data: image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android' tag: '{{trigger.payload_json["after"]["id"]}}' + when: '{{trigger.payload_json["after"]["start_time"]|int}}' ``` ```yaml @@ -75,6 +138,7 @@ automation: data: image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" tag: "{{trigger.payload_json['after']['id']}}" + when: '{{trigger.payload_json["after"]["start_time"]|int}}' ``` ```yaml @@ -93,6 +157,7 @@ automation: data: image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" tag: "{{trigger.payload_json['after']['id']}}" + when: '{{trigger.payload_json["after"]["start_time"]|int}}' ``` ```yaml @@ -111,6 +176,7 @@ automation: data: image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" tag: "{{trigger.payload_json['after']['id']}}" + when: '{{trigger.payload_json["after"]["start_time"]|int}}' ``` If you are using telegram, you can fetch the image directly from Frigate: @@ -131,3 +197,85 @@ automation: - url: 'http://ccab4aaf-frigate:5000/api/events/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg' caption: 'A {{trigger.payload_json["after"]["label"]}} was detected on {{ trigger.payload_json["after"]["camera"] }} camera' ``` + + + +## RTMP stream + +In order for the live streams to function they need to be accessible on the RTMP +port (default: `1935`) at `:1935`. Home Assistant will directly +connect to that streaming port when the live camera is viewed. + +#### RTMP URL Template + +For advanced usecases, this behavior can be changed with the [RTMP URL +template](#options) option. When set, this string will override the default stream +address that is derived from the default behavior described above. This option supports +[jinja2 templates](https://jinja.palletsprojects.com/) and has the `camera` dict +variables from [Frigate API](https://blakeblackshear.github.io/frigate/usage/api#apiconfig) +available for the template. Note that no Home Assistant state is available to the +template, only the camera dict from Frigate. + +This is potentially useful when Frigate is behind a reverse proxy, and/or when +the default stream port is otherwise not accessible to Home Assistant (e.g. +firewall rules). + +###### RTMP URL Template Examples + +Use a different port number: + +``` +rtmp://:2000/live/front_door +``` + +Use the camera name in the stream URL: + +``` +rtmp://:2000/live/{{ name }} +``` + +Use the camera name in the stream URL, converting it to lowercase first: + +``` +rtmp://:2000/live/{{ name|lower }} +``` + +## Multiple Instance Support + +The Frigate integration seamlessly supports the use of multiple Frigate servers. + +### Requirements for Multiple Instances + +In order for multiple Frigate instances to function correctly, the +`topic_prefix` and `client_id` parameters must be set differently per server. +See [MQTT +configuration](https://blakeblackshear.github.io/frigate/configuration/index#mqtt) +for how to set these. + +#### API URLs + +When multiple Frigate instances are configured, [API](#api) URLs should include an +identifier to tell Home Assistant which Frigate instance to refer to. The +identifier used is the MQTT `client_id` paremeter included in the configuration, +and is used like so: + +``` +https://HA_URL/api/frigate//notifications//thumbnail.jpg +``` + +``` +https://HA_URL/api/frigate//clips/front_door-1624599978.427826-976jaa.mp4 +``` + +#### Default Treatment + +When a single Frigate instance is configured, the `client-id` parameter need not +be specified in URLs/identifiers -- that single instance is assumed. When +multiple Frigate instances are configured, the user **must** explicitly specify +which server they are referring to. + +## FAQ + +### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit? + +The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera. diff --git a/docs/docs/usage/mqtt.md b/docs/docs/usage/mqtt.md index 27712c124..73ab23bb8 100644 --- a/docs/docs/usage/mqtt.md +++ b/docs/docs/usage/mqtt.md @@ -11,6 +11,10 @@ Designed to be used as an availability topic with Home Assistant. Possible messa "online": published when frigate is running (on startup) "offline": published right before frigate stops +### `frigate/restart` + +Causes frigate to exit. Docker should be configured to automatically restart the container on exit. + ### `frigate//` Publishes the count of objects for the camera for use as a sensor in Home Assistant. @@ -84,13 +88,13 @@ Topic to turn detection for a camera on and off. Expected values are `ON` and `O Topic with current state of detection for a camera. Published values are `ON` and `OFF`. -### `frigate//clips/set` +### `frigate//recordings/set` -Topic to turn clips for a camera on and off. Expected values are `ON` and `OFF`. +Topic to turn recordings for a camera on and off. Expected values are `ON` and `OFF`. -### `frigate//clips/state` +### `frigate//recordings/state` -Topic with current state of clips for a camera. Published values are `ON` and `OFF`. +Topic with current state of recordings for a camera. Published values are `ON` and `OFF`. ### `frigate//snapshots/set` diff --git a/frigate/app.py b/frigate/app.py index 1d3d7bda5..bf3f12989 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -23,7 +23,7 @@ from frigate.models import Event, Recordings from frigate.mqtt import create_mqtt_client, MqttSocketRelay from frigate.object_processing import TrackedObjectProcessor from frigate.output import output_frames -from frigate.record import RecordingMaintainer +from frigate.record import RecordingCleanup, RecordingMaintainer from frigate.stats import StatsEmitter, stats_init from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog @@ -90,15 +90,6 @@ class FrigateApp: assigned_roles = list( set([r for i in camera.ffmpeg.inputs for r in i.roles]) ) - if not camera.clips.enabled and "clips" in assigned_roles: - logger.warning( - f"Camera {name} has clips assigned to an input, but clips is not enabled." - ) - elif camera.clips.enabled and not "clips" in assigned_roles: - logger.warning( - f"Camera {name} has clips enabled, but clips is not assigned to an input." - ) - if not camera.record.enabled and "record" in assigned_roles: logger.warning( f"Camera {name} has record assigned to an input, but record is not enabled." @@ -259,6 +250,7 @@ class FrigateApp: name, config, model_shape, + self.config.model.merged_labelmap, self.detection_queue, self.detection_out_events[name], self.detected_frames_queue, @@ -300,6 +292,10 @@ class FrigateApp: self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event) self.recording_maintainer.start() + def start_recording_cleanup(self): + self.recording_cleanup = RecordingCleanup(self.config, self.stop_event) + self.recording_cleanup.start() + def start_stats_emitter(self): self.stats_emitter = StatsEmitter( self.config, @@ -345,6 +341,7 @@ class FrigateApp: self.start_event_processor() self.start_event_cleanup() self.start_recording_maintainer() + self.start_recording_cleanup() self.start_stats_emitter() self.start_watchdog() # self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id) @@ -371,6 +368,7 @@ class FrigateApp: self.event_processor.join() self.event_cleanup.join() self.recording_maintainer.join() + self.recording_cleanup.join() self.stats_emitter.join() self.frigate_watchdog.join() self.db.stop() diff --git a/frigate/config.py b/frigate/config.py index 55b421fcd..ea6ea3280 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -1,18 +1,19 @@ from __future__ import annotations -from enum import Enum import json import logging import os +from enum import Enum from typing import Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np +import yaml from pydantic import BaseModel, Field, validator from pydantic.fields import PrivateAttr -import yaml -from frigate.const import BASE_DIR, RECORD_DIR, CACHE_DIR +from frigate.const import BASE_DIR, CACHE_DIR, RECORD_DIR +from frigate.edgetpu import load_labels from frigate.util import create_mask, deep_merge logger = logging.getLogger(__name__) @@ -25,7 +26,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S" FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} DEFAULT_TRACKED_OBJECTS = ["person"] -DEFAULT_DETECTORS = {"coral": {"type": "edgetpu", "device": "usb"}} +DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} class DetectorTypeEnum(str, Enum): @@ -34,9 +35,7 @@ class DetectorTypeEnum(str, Enum): class DetectorConfig(BaseModel): - type: DetectorTypeEnum = Field( - default=DetectorTypeEnum.edgetpu, title="Detector Type" - ) + type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type") device: str = Field(default="usb", title="Device Type") num_threads: int = Field(default=3, title="Number of detection threads") @@ -68,13 +67,32 @@ class RetainConfig(BaseModel): ) +# DEPRECATED: Will eventually be removed class ClipsConfig(BaseModel): + enabled: bool = Field(default=False, title="Save clips.") max_seconds: int = Field(default=300, title="Maximum clip duration.") + pre_capture: int = Field(default=5, title="Seconds to capture before event starts.") + post_capture: int = Field(default=5, title="Seconds to capture after event ends.") + required_zones: List[str] = Field( + default_factory=list, + title="List of required zones to be entered in order to save the clip.", + ) + objects: Optional[List[str]] = Field( + title="List of objects to be detected in order to save the clip.", + ) retain: RetainConfig = Field( default_factory=RetainConfig, title="Clip retention settings." ) +class RecordConfig(BaseModel): + enabled: bool = Field(default=False, title="Enable record on all cameras.") + retain_days: int = Field(default=0, title="Recording retention period in days.") + events: ClipsConfig = Field( + default_factory=ClipsConfig, title="Event specific settings." + ) + + class MotionConfig(BaseModel): threshold: int = Field( default=25, @@ -99,11 +117,13 @@ class RuntimeMotionConfig(MotionConfig): frame_shape = config.get("frame_shape", (1, 1)) if "frame_height" not in config: - config["frame_height"] = max(frame_shape[0] // 6, 120) + config["frame_height"] = max(frame_shape[0] // 6, 180) if "contour_area" not in config: frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0] - config["contour_area"] = config["frame_height"] * frame_width * 0.003912363 + config["contour_area"] = ( + config["frame_height"] * frame_width * 0.00173611111 + ) mask = config.get("mask", "") config["raw_mask"] = mask @@ -129,6 +149,9 @@ class RuntimeMotionConfig(MotionConfig): class DetectConfig(BaseModel): + height: int = Field(title="Height of the stream for the detect role.") + width: int = Field(title="Width of the stream for the detect role.") + fps: int = Field(title="Number of frames per second to process through detection.") enabled: bool = Field(default=True, title="Detection Enabled.") max_disappeared: Optional[int] = Field( title="Maximum number of frames the object can dissapear before detection ends." @@ -185,6 +208,10 @@ class ZoneConfig(BaseModel): coordinates: Union[str, List[str]] = Field( title="Coordinates polygon for the defined zone." ) + objects: List[str] = Field( + default_factory=list, + title="List of objects that can trigger the zone.", + ) _color: Optional[Tuple[int, int, int]] = PrivateAttr() _contour: np.ndarray = PrivateAttr() @@ -257,26 +284,11 @@ FFMPEG_INPUT_ARGS_DEFAULT = [ ] DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"] RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"] -SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = [ - "-f", - "segment", - "-segment_time", - "10", - "-segment_format", - "mp4", - "-reset_timestamps", - "1", - "-strftime", - "1", - "-c", - "copy", - "-an", -] RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [ "-f", "segment", "-segment_time", - "60", + "10", "-segment_format", "mp4", "-reset_timestamps", @@ -298,10 +310,6 @@ class FfmpegOutputArgsConfig(BaseModel): default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, title="Record role FFmpeg output arguments.", ) - clips: Union[str, List[str]] = Field( - default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT, - title="Clips role FFmpeg output arguments.", - ) rtmp: Union[str, List[str]] = Field( default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT, title="RTMP role FFmpeg output arguments.", @@ -340,18 +348,6 @@ class CameraInput(BaseModel): class CameraFfmpegConfig(FfmpegConfig): inputs: List[CameraInput] = Field(title="Camera inputs.") - global_args: Union[str, List[str]] = Field( - default_factory=list, title="FFmpeg global arguments." - ) - hwaccel_args: Union[str, List[str]] = Field( - default_factory=list, title="FFmpeg hardware acceleration arguments." - ) - input_args: Union[str, List[str]] = Field( - default_factory=list, title="FFmpeg input arguments." - ) - output_args: FfmpegOutputArgsConfig = Field( - default_factory=FfmpegOutputArgsConfig, title="FFmpeg output arguments." - ) @validator("inputs") def validate_roles(cls, v): @@ -428,43 +424,18 @@ class CameraMqttConfig(BaseModel): ) -class CameraClipsConfig(BaseModel): - enabled: bool = Field(default=False, title="Save clips.") - pre_capture: int = Field(default=5, title="Seconds to capture before event starts.") - post_capture: int = Field(default=5, title="Seconds to capture after event ends.") - required_zones: List[str] = Field( - default_factory=list, - title="List of required zones to be entered in order to save the clip.", - ) - objects: Optional[List[str]] = Field( - title="List of objects to be detected in order to save the clip.", - ) - retain: RetainConfig = Field(default_factory=RetainConfig, title="Clip retention.") - - class CameraRtmpConfig(BaseModel): enabled: bool = Field(default=True, title="RTMP restreaming enabled.") class CameraLiveConfig(BaseModel): - height: Optional[int] = Field(title="Live camera view height") - width: Optional[int] = Field(title="Live camera view width") + height: int = Field(default=720, title="Live camera view height") quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") -class RecordConfig(BaseModel): - enabled: bool = Field(default=False, title="Enable record on all cameras.") - retain_days: int = Field(default=30, title="Recording retention period in days.") - - class CameraConfig(BaseModel): name: Optional[str] = Field(title="Camera name.") ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") - height: int = Field(title="Height of the stream for the detect role.") - width: int = Field(title="Width of the stream for the detect role.") - fps: Optional[int] = Field( - title="Number of frames per second to process through Frigate." - ) best_image_timeout: int = Field( default=60, title="How long to wait for the image with the highest confidence score.", @@ -472,9 +443,6 @@ class CameraConfig(BaseModel): zones: Dict[str, ZoneConfig] = Field( default_factory=dict, title="Zone configuration." ) - clips: CameraClipsConfig = Field( - default_factory=CameraClipsConfig, title="Clip configuration." - ) record: RecordConfig = Field( default_factory=RecordConfig, title="Record configuration." ) @@ -492,7 +460,7 @@ class CameraConfig(BaseModel): default_factory=ObjectConfig, title="Object configuration." ) motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") - detect: Optional[DetectConfig] = Field(title="Object detection configuration.") + detect: DetectConfig = Field(title="Object detection configuration.") timestamp_style: TimestampStyleConfig = Field( default_factory=TimestampStyleConfig, title="Timestamp style configuration." ) @@ -510,11 +478,11 @@ class CameraConfig(BaseModel): @property def frame_shape(self) -> Tuple[int, int]: - return self.height, self.width + return self.detect.height, self.detect.width @property def frame_shape_yuv(self) -> Tuple[int, int]: - return self.height * 3 // 2, self.width + return self.detect.height * 3 // 2, self.detect.width @property def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]: @@ -535,9 +503,17 @@ class CameraConfig(BaseModel): if isinstance(self.ffmpeg.output_args.detect, list) else self.ffmpeg.output_args.detect.split(" ") ) - ffmpeg_output_args = detect_args + ffmpeg_output_args + ["pipe:"] - if self.fps: - ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args + ffmpeg_output_args = ( + [ + "-r", + str(self.detect.fps), + "-s", + f"{self.detect.width}x{self.detect.height}", + ] + + detect_args + + ffmpeg_output_args + + ["pipe:"] + ) if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled: rtmp_args = ( self.ffmpeg.output_args.rtmp @@ -547,17 +523,6 @@ class CameraConfig(BaseModel): ffmpeg_output_args = ( rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args ) - if "clips" in ffmpeg_input.roles: - clips_args = ( - self.ffmpeg.output_args.clips - if isinstance(self.ffmpeg.output_args.clips, list) - else self.ffmpeg.output_args.clips.split(" ") - ) - ffmpeg_output_args = ( - clips_args - + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] - + ffmpeg_output_args - ) if "record" in ffmpeg_input.roles and self.record.enabled: record_args = ( self.ffmpeg.output_args.record @@ -566,7 +531,7 @@ class CameraConfig(BaseModel): ) ffmpeg_output_args = ( record_args - + [f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] + + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] + ffmpeg_output_args ) @@ -609,6 +574,33 @@ class DatabaseConfig(BaseModel): class ModelConfig(BaseModel): width: int = Field(default=320, title="Object detection model input width.") height: int = Field(default=320, title="Object detection model input height.") + labelmap: Dict[int, str] = Field( + default_factory=dict, title="Labelmap customization." + ) + _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() + _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() + + @property + def merged_labelmap(self) -> Dict[int, str]: + return self._merged_labelmap + + @property + def colormap(self) -> Dict[int, tuple[int, int, int]]: + return self._colormap + + def __init__(self, **config): + super().__init__(**config) + + self._merged_labelmap = { + **load_labels("/labelmap.txt"), + **config.get("labelmap", {}), + } + + cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys())) + + self._colormap = {} + for key, val in self._merged_labelmap.items(): + self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) class LogLevelEnum(str, Enum): @@ -652,9 +644,6 @@ class FrigateConfig(BaseModel): logger: LoggerConfig = Field( default_factory=LoggerConfig, title="Logging configuration." ) - clips: ClipsConfig = Field( - default_factory=ClipsConfig, title="Global clips configuration." - ) record: RecordConfig = Field( default_factory=RecordConfig, title="Global record configuration." ) @@ -690,7 +679,6 @@ class FrigateConfig(BaseModel): # Global config to propegate down to camera level global_config = config.dict( include={ - "clips": {"retain"}, "record": ..., "snapshots": ..., "objects": ..., @@ -703,7 +691,9 @@ class FrigateConfig(BaseModel): for name, camera in config.cameras.items(): merged_config = deep_merge(camera.dict(exclude_unset=True), global_config) - camera_config = CameraConfig.parse_obj({"name": name, **merged_config}) + camera_config: CameraConfig = CameraConfig.parse_obj( + {"name": name, **merged_config} + ) # FFMPEG input substitution for input in camera_config.ffmpeg.inputs: @@ -753,30 +743,13 @@ class FrigateConfig(BaseModel): ) # Default detect configuration - max_disappeared = (camera_config.fps or 5) * 5 - if camera_config.detect: - if camera_config.detect.max_disappeared is None: - camera_config.detect.max_disappeared = max_disappeared - else: - camera_config.detect = DetectConfig(max_disappeared=max_disappeared) + max_disappeared = camera_config.detect.fps * 5 + if camera_config.detect.max_disappeared is None: + camera_config.detect.max_disappeared = max_disappeared # Default live configuration - if camera_config.live: - if ( - camera_config.live.height - and camera_config.live.height <= camera_config.height - ): - camera_config.live.width = int( - camera_config.live.height - * (camera_config.width / camera_config.height) - ) - else: - camera_config.live.height = camera_config.height - camera_config.live.width = camera_config.width - else: - camera_config.live = CameraLiveConfig( - height=camera_config.height, width=camera_config.width - ) + if camera_config.live is None: + camera_config.live = CameraLiveConfig() config.cameras[name] = camera_config diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py index 2ffd8c198..62c35eaf5 100644 --- a/frigate/edgetpu.py +++ b/frigate/edgetpu.py @@ -68,9 +68,14 @@ class LocalObjectDetector(ObjectDetector): experimental_delegates=[edge_tpu_delegate], ) except ValueError: - logger.info("No EdgeTPU detected.") + logger.error( + "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors." + ) raise else: + logger.warning( + "CPU detectors are not recommended and should only be used for testing or for trial purposes." + ) self.interpreter = tflite.Interpreter( model_path="/cpu_model.tflite", num_threads=num_threads ) @@ -97,21 +102,22 @@ class LocalObjectDetector(ObjectDetector): def detect_raw(self, tensor_input): self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.invoke() - boxes = np.squeeze( - self.interpreter.get_tensor(self.tensor_output_details[0]["index"]) - ) - label_codes = np.squeeze( - self.interpreter.get_tensor(self.tensor_output_details[1]["index"]) - ) - scores = np.squeeze( - self.interpreter.get_tensor(self.tensor_output_details[2]["index"]) + + boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] + class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] + scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] + count = int( + self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0] ) detections = np.zeros((20, 6), np.float32) - for i, score in enumerate(scores): + + for i in range(count): + if scores[i] < 0.4 or i == 20: + break detections[i] = [ - label_codes[i], - score, + class_ids[i], + float(scores[i]), boxes[i][0], boxes[i][1], boxes[i][2], @@ -231,7 +237,7 @@ class EdgeTPUProcess: class RemoteObjectDetector: def __init__(self, name, labels, detection_queue, event, model_shape): - self.labels = load_labels(labels) + self.labels = labels self.name = name self.fps = EventsPerSecond() self.detection_queue = detection_queue diff --git a/frigate/events.py b/frigate/events.py index b8ff6cfd7..3293d19bb 100644 --- a/frigate/events.py +++ b/frigate/events.py @@ -1,20 +1,14 @@ import datetime -import json import logging import os import queue -import subprocess as sp import threading import time -from collections import defaultdict from pathlib import Path -import psutil -import shutil - -from frigate.config import FrigateConfig -from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR -from frigate.models import Event +from frigate.config import FrigateConfig, RecordConfig +from frigate.const import CLIPS_DIR +from frigate.models import Event, Recordings from peewee import fn @@ -39,8 +33,16 @@ class EventProcessor(threading.Thread): if event_data["false_positive"]: return False - # if there are required zones and there is no overlap - required_zones = self.config.cameras[camera].clips.required_zones + record_config: RecordConfig = self.config.cameras[camera].record + + # Recording clips is disabled + if not record_config.enabled or ( + record_config.retain_days == 0 and not record_config.events.enabled + ): + return False + + # If there are required zones and there is no overlap + required_zones = record_config.events.required_zones if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set( required_zones ): @@ -49,174 +51,16 @@ class EventProcessor(threading.Thread): ) return False - return True - - def refresh_cache(self): - cached_files = os.listdir(CACHE_DIR) - - files_in_use = [] - for process in psutil.process_iter(): - try: - if process.name() != "ffmpeg": - continue - - flist = process.open_files() - if flist: - for nt in flist: - if nt.path.startswith(CACHE_DIR): - files_in_use.append(nt.path.split("/")[-1]) - except: - continue - - for f in cached_files: - if f in files_in_use or f in self.cached_clips: - continue - - basename = os.path.splitext(f)[0] - camera, date = basename.rsplit("-", maxsplit=1) - start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S") - - ffprobe_cmd = [ - "ffprobe", - "-v", - "error", - "-show_entries", - "format=duration", - "-of", - "default=noprint_wrappers=1:nokey=1", - f"{os.path.join(CACHE_DIR, f)}", - ] - p = sp.run(ffprobe_cmd, capture_output=True) - if p.returncode == 0: - duration = float(p.stdout.decode().strip()) - else: - logger.info(f"bad file: {f}") - os.remove(os.path.join(CACHE_DIR, f)) - continue - - self.cached_clips[f] = { - "path": f, - "camera": camera, - "start_time": start_time.timestamp(), - "duration": duration, - } - - if len(self.events_in_process) > 0: - earliest_event = min( - self.events_in_process.values(), key=lambda x: x["start_time"] - )["start_time"] - else: - earliest_event = datetime.datetime.now().timestamp() - - # if the earliest event is more tha max seconds ago, cap it - max_seconds = self.config.clips.max_seconds - earliest_event = max( - earliest_event, - datetime.datetime.now().timestamp() - self.config.clips.max_seconds, - ) - - for f, data in list(self.cached_clips.items()): - if earliest_event - 90 > data["start_time"] + data["duration"]: - del self.cached_clips[f] - logger.debug(f"Cleaning up cached file {f}") - os.remove(os.path.join(CACHE_DIR, f)) - - # if we are still using more than 90% of the cache, proactively cleanup - cache_usage = shutil.disk_usage("/tmp/cache") - while ( - cache_usage.used / cache_usage.total > 0.9 - and cache_usage.free < 200000000 - and len(self.cached_clips) > 0 + # If the required objects are not present + if ( + record_config.events.objects is not None + and event_data["label"] not in record_config.events.objects ): - logger.warning("More than 90% of the cache is used.") - logger.warning( - "Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config." + logger.debug( + f"Not creating clip for {event_data['id']} because it did not contain required objects" ) - logger.warning("Proactively cleaning up the cache...") - oldest_clip = min(self.cached_clips.values(), key=lambda x: x["start_time"]) - del self.cached_clips[oldest_clip["path"]] - os.remove(os.path.join(CACHE_DIR, oldest_clip["path"])) - cache_usage = shutil.disk_usage("/tmp/cache") - - def create_clip(self, camera, event_data, pre_capture, post_capture): - # get all clips from the camera with the event sorted - sorted_clips = sorted( - [c for c in self.cached_clips.values() if c["camera"] == camera], - key=lambda i: i["start_time"], - ) - - # if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds - wait_count = 0 - while ( - len(sorted_clips) == 0 - or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"] - < event_data["end_time"] + post_capture - ): - if wait_count > 4: - logger.warning( - f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event." - ) - return False - logger.debug(f"No cache clips for {camera}. Waiting...") - time.sleep(5) - self.refresh_cache() - # get all clips from the camera with the event sorted - sorted_clips = sorted( - [c for c in self.cached_clips.values() if c["camera"] == camera], - key=lambda i: i["start_time"], - ) - wait_count += 1 - - playlist_start = event_data["start_time"] - pre_capture - playlist_end = event_data["end_time"] + post_capture - playlist_lines = [] - for clip in sorted_clips: - # clip ends before playlist start time, skip - if clip["start_time"] + clip["duration"] < playlist_start: - continue - # clip starts after playlist ends, finish - if clip["start_time"] > playlist_end: - break - playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'") - # if this is the starting clip, add an inpoint - if clip["start_time"] < playlist_start: - playlist_lines.append( - f"inpoint {int(playlist_start-clip['start_time'])}" - ) - # if this is the ending clip, add an outpoint - if clip["start_time"] + clip["duration"] > playlist_end: - playlist_lines.append( - f"outpoint {int(playlist_end-clip['start_time'])}" - ) - - clip_name = f"{camera}-{event_data['id']}" - ffmpeg_cmd = [ - "ffmpeg", - "-y", - "-protocol_whitelist", - "pipe,file", - "-f", - "concat", - "-safe", - "0", - "-i", - "-", - "-c", - "copy", - "-movflags", - "+faststart", - f"{os.path.join(CLIPS_DIR, clip_name)}.mp4", - ] - - p = sp.run( - ffmpeg_cmd, - input="\n".join(playlist_lines), - encoding="ascii", - capture_output=True, - ) - if p.returncode != 0: - logger.error(p.stderr) return False + return True def run(self): @@ -224,33 +68,19 @@ class EventProcessor(threading.Thread): try: event_type, camera, event_data = self.event_queue.get(timeout=10) except queue.Empty: - if not self.stop_event.is_set(): - self.refresh_cache() continue logger.debug(f"Event received: {event_type} {camera} {event_data['id']}") - self.refresh_cache() if event_type == "start": self.events_in_process[event_data["id"]] = event_data if event_type == "end": - clips_config = self.config.cameras[camera].clips + record_config: RecordConfig = self.config.cameras[camera].record - clip_created = False - if self.should_create_clip(camera, event_data): - if clips_config.enabled and ( - clips_config.objects is None - or event_data["label"] in clips_config.objects - ): - clip_created = self.create_clip( - camera, - event_data, - clips_config.pre_capture, - clips_config.post_capture, - ) + has_clip = self.should_create_clip(camera, event_data) - if clip_created or event_data["has_snapshot"]: + if has_clip or event_data["has_snapshot"]: Event.create( id=event_data["id"], label=event_data["label"], @@ -261,11 +91,12 @@ class EventProcessor(threading.Thread): false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), thumbnail=event_data["thumbnail"], - has_clip=clip_created, + has_clip=has_clip, has_snapshot=event_data["has_snapshot"], ) + del self.events_in_process[event_data["id"]] - self.event_processed_queue.put((event_data["id"], camera, clip_created)) + self.event_processed_queue.put((event_data["id"], camera, has_clip)) logger.info(f"Exiting event processor...") @@ -281,7 +112,7 @@ class EventCleanup(threading.Thread): def expire(self, media_type): ## Expire events from unlisted cameras based on the global config if media_type == "clips": - retain_config = self.config.clips.retain + retain_config = self.config.record.events.retain file_extension = "mp4" update_params = {"has_clip": False} else: @@ -332,7 +163,7 @@ class EventCleanup(threading.Thread): ## Expire events from cameras based on the camera config for name, camera in self.config.cameras.items(): if media_type == "clips": - retain_config = camera.clips.retain + retain_config = camera.record.events.retain else: retain_config = camera.snapshots.retain # get distinct objects in database for this camera diff --git a/frigate/http.py b/frigate/http.py index 2f3a15221..cef448beb 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -6,11 +6,13 @@ import glob import logging import os import re +import subprocess as sp import time from functools import reduce from pathlib import Path import cv2 +from flask.helpers import send_file import numpy as np from flask import ( @@ -185,6 +187,7 @@ def event_thumbnail(id): @bp.route("/events//snapshot.jpg") def event_snapshot(id): + download = request.args.get("download", type=bool) jpg_bytes = None try: event = Event.get(Event.id == id) @@ -220,6 +223,45 @@ def event_snapshot(id): response = make_response(jpg_bytes) response.headers["Content-Type"] = "image/jpg" + if download: + response.headers[ + "Content-Disposition" + ] = f"attachment; filename=snapshot-{id}.jpg" + return response + + +@bp.route("/events//clip.mp4") +def event_clip(id): + download = request.args.get("download", type=bool) + + try: + event: Event = Event.get(Event.id == id) + except DoesNotExist: + return "Event not found.", 404 + + if not event.has_clip: + return "Clip not available", 404 + + event_config = current_app.frigate_config.cameras[event.camera].record.events + start_ts = event.start_time - event_config.pre_capture + end_ts = event.end_time + event_config.post_capture + file_name = f"{event.camera}-{id}.mp4" + clip_path = os.path.join(CLIPS_DIR, file_name) + + if not os.path.isfile(clip_path): + return recording_clip(event.camera, start_ts, end_ts) + + response = make_response() + response.headers["Content-Description"] = "File Transfer" + response.headers["Cache-Control"] = "no-cache" + response.headers["Content-Type"] = "video/mp4" + if download: + response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name + response.headers["Content-Length"] = os.path.getsize(clip_path) + response.headers[ + "X-Accel-Redirect" + ] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile + return response @@ -277,7 +319,16 @@ def events(): @bp.route("/config") def config(): - return jsonify(current_app.frigate_config.dict()) + config = current_app.frigate_config.dict() + + # add in the ffmpeg_cmds + for camera_name, camera in current_app.frigate_config.cameras.items(): + camera_dict = config["cameras"][camera_name] + camera_dict["ffmpeg_cmds"] = camera.ffmpeg_cmds + for cmd in camera_dict["ffmpeg_cmds"]: + cmd["cmd"] = " ".join(cmd["cmd"]) + + return jsonify(config) @bp.route("/config/schema") @@ -508,19 +559,87 @@ def recordings(camera_name): ) -@bp.route("/vod////") -def vod(year_month, day, hour, camera): - start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H") - end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1) - start_ts = start_date.timestamp() - end_ts = end_date.timestamp() +@bp.route("//start//end//clip.mp4") +@bp.route("//start//end//clip.mp4") +def recording_clip(camera, start_ts, end_ts): + download = request.args.get("download", type=bool) - # Select all recordings where either the start or end dates fall in the requested hour recordings = ( Recordings.select() .where( (Recordings.start_time.between(start_ts, end_ts)) | (Recordings.end_time.between(start_ts, end_ts)) + | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) + ) + .where(Recordings.camera == camera) + .order_by(Recordings.start_time.asc()) + ) + + playlist_lines = [] + clip: Recordings + for clip in recordings: + playlist_lines.append(f"file '{clip.path}'") + # if this is the starting clip, add an inpoint + if clip.start_time < start_ts: + playlist_lines.append(f"inpoint {int(start_ts - clip.start_time)}") + # if this is the ending clip, add an outpoint + if clip.end_time > end_ts: + playlist_lines.append(f"outpoint {int(end_ts - clip.start_time)}") + + file_name = f"clip_{camera}_{start_ts}-{end_ts}.mp4" + path = f"/tmp/cache/{file_name}" + + ffmpeg_cmd = [ + "ffmpeg", + "-y", + "-protocol_whitelist", + "pipe,file", + "-f", + "concat", + "-safe", + "0", + "-i", + "-", + "-c", + "copy", + "-movflags", + "+faststart", + path, + ] + + p = sp.run( + ffmpeg_cmd, + input="\n".join(playlist_lines), + encoding="ascii", + capture_output=True, + ) + if p.returncode != 0: + logger.error(p.stderr) + return f"Could not create clip from recordings for {camera}.", 500 + + response = make_response() + response.headers["Content-Description"] = "File Transfer" + response.headers["Cache-Control"] = "no-cache" + response.headers["Content-Type"] = "video/mp4" + if download: + response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name + response.headers["Content-Length"] = os.path.getsize(path) + response.headers[ + "X-Accel-Redirect" + ] = f"/cache/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile + + return response + + +@bp.route("/vod//start//end/") +@bp.route("/vod//start//end/") +def vod_ts(camera, start_ts, end_ts): + recordings = ( + Recordings.select() + .where( + Recordings.start_time.between(start_ts, end_ts) + | Recordings.end_time.between(start_ts, end_ts) + | ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time)) ) .where(Recordings.camera == camera) .order_by(Recordings.start_time.asc()) @@ -544,9 +663,13 @@ def vod(year_month, day, hour, camera): clips.append(clip) durations.append(duration) + if not clips: + return "No recordings found.", 404 + + hour_ago = datetime.now() - timedelta(hours=1) return jsonify( { - "cache": datetime.now() - timedelta(hours=1) > start_date, + "cache": hour_ago.timestamp() > start_ts, "discontinuity": False, "durations": durations, "sequences": [{"clips": clips}], @@ -554,6 +677,45 @@ def vod(year_month, day, hour, camera): ) +@bp.route("/vod////") +def vod_hour(year_month, day, hour, camera): + start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H") + end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1) + start_ts = start_date.timestamp() + end_ts = end_date.timestamp() + + return vod_ts(camera, start_ts, end_ts) + + +@bp.route("/vod/event/") +def vod_event(id): + try: + event: Event = Event.get(Event.id == id) + except DoesNotExist: + return "Event not found.", 404 + + if not event.has_clip: + return "Clip not available", 404 + + event_config = current_app.frigate_config.cameras[event.camera].record.events + start_ts = event.start_time - event_config.pre_capture + end_ts = event.end_time + event_config.post_capture + clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4") + + if not os.path.isfile(clip_path): + return vod_ts(event.camera, start_ts, end_ts) + + duration = int((end_ts - start_ts) * 1000) + return jsonify( + { + "cache": True, + "discontinuity": False, + "durations": [duration], + "sequences": [{"clips": [{"type": "source", "path": clip_path}]}], + } + ) + + def imagestream(detected_frames_processor, camera_name, fps, height, draw_options): while True: # max out at specified FPS diff --git a/frigate/mqtt.py b/frigate/mqtt.py index bc10e95c8..78b6590a9 100644 --- a/frigate/mqtt.py +++ b/frigate/mqtt.py @@ -13,6 +13,7 @@ from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.websocket import WebSocket from frigate.config import FrigateConfig +from frigate.util import restart_frigate logger = logging.getLogger(__name__) @@ -20,22 +21,22 @@ logger = logging.getLogger(__name__) def create_mqtt_client(config: FrigateConfig, camera_metrics): mqtt_config = config.mqtt - def on_clips_command(client, userdata, message): + def on_recordings_command(client, userdata, message): payload = message.payload.decode() - logger.debug(f"on_clips_toggle: {message.topic} {payload}") + logger.debug(f"on_recordings_toggle: {message.topic} {payload}") camera_name = message.topic.split("/")[-3] - clips_settings = config.cameras[camera_name].clips + record_settings = config.cameras[camera_name].record if payload == "ON": - if not clips_settings.enabled: - logger.info(f"Turning on clips for {camera_name} via mqtt") - clips_settings.enabled = True + if not record_settings.enabled: + logger.info(f"Turning on recordings for {camera_name} via mqtt") + record_settings.enabled = True elif payload == "OFF": - if clips_settings.enabled: - logger.info(f"Turning off clips for {camera_name} via mqtt") - clips_settings.enabled = False + if record_settings.enabled: + logger.info(f"Turning off recordings for {camera_name} via mqtt") + record_settings.enabled = False else: logger.warning(f"Received unsupported value at {message.topic}: {payload}") @@ -88,6 +89,9 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics): state_topic = f"{message.topic[:-4]}/state" client.publish(state_topic, payload, retain=True) + def on_restart_command(client, userdata, message): + restart_frigate() + def on_connect(client, userdata, flags, rc): threading.current_thread().name = "mqtt" if rc != 0: @@ -116,7 +120,7 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics): # register callbacks for name in config.cameras.keys(): client.message_callback_add( - f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command + f"{mqtt_config.topic_prefix}/{name}/recordings/set", on_recordings_command ) client.message_callback_add( f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command @@ -125,6 +129,10 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics): f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command ) + client.message_callback_add( + f"{mqtt_config.topic_prefix}/restart", on_restart_command + ) + if not mqtt_config.tls_ca_certs is None: if ( not mqtt_config.tls_client_cert is None @@ -151,8 +159,8 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics): for name in config.cameras.keys(): client.publish( - f"{mqtt_config.topic_prefix}/{name}/clips/state", - "ON" if config.cameras[name].clips.enabled else "OFF", + f"{mqtt_config.topic_prefix}/{name}/recordings/state", + "ON" if config.cameras[name].record.enabled else "OFF", retain=True, ) client.publish( @@ -184,7 +192,7 @@ class MqttSocketRelay: json_message = json.loads(message.data.decode("utf-8")) json_message = { "topic": f"{self.topic_prefix}/{json_message['topic']}", - "payload": json_message["payload"], + "payload": json_message.get("payload"), "retain": json_message.get("retain", False), } except Exception as e: diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 706d04328..4ebd20870 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -1,5 +1,5 @@ -import copy import base64 +import copy import datetime import hashlib import itertools @@ -14,30 +14,20 @@ from statistics import mean, median from typing import Callable, Dict import cv2 -import matplotlib.pyplot as plt import numpy as np -from frigate.config import FrigateConfig, CameraConfig -from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR +from frigate.config import CameraConfig, FrigateConfig +from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR from frigate.edgetpu import load_labels from frigate.util import ( SharedMemoryFrameManager, + calculate_region, draw_box_with_label, draw_timestamp, - calculate_region, ) logger = logging.getLogger(__name__) -PATH_TO_LABELS = "/labelmap.txt" - -LABELS = load_labels(PATH_TO_LABELS) -cmap = plt.cm.get_cmap("tab10", len(LABELS.keys())) - -COLOR_MAP = {} -for key, val in LABELS.items(): - COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) - def on_edge(box, frame_shape): if ( @@ -72,9 +62,12 @@ def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool: class TrackedObject: - def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data): + def __init__( + self, camera, colormap, camera_config: CameraConfig, frame_cache, obj_data + ): self.obj_data = obj_data self.camera = camera + self.colormap = colormap self.camera_config = camera_config self.frame_cache = frame_cache self.current_zones = [] @@ -107,6 +100,7 @@ class TrackedObject: def update(self, current_frame_time, obj_data): significant_update = False + zone_change = False self.obj_data.update(obj_data) # if the object is not in the current frame, add a 0.0 to the score history if self.obj_data["frame_time"] != current_frame_time: @@ -142,6 +136,9 @@ class TrackedObject: bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3]) # check each zone for name, zone in self.camera_config.zones.items(): + # if the zone is not for this object type, skip + if len(zone.objects) > 0 and not self.obj_data["label"] in zone.objects: + continue contour = zone.contour # check if the object is in the zone if cv2.pointPolygonTest(contour, bottom_center, False) >= 0: @@ -152,10 +149,10 @@ class TrackedObject: # if the zones changed, signal an update if not self.false_positive and set(self.current_zones) != set(current_zones): - significant_update = True + zone_change = True self.current_zones = current_zones - return significant_update + return (significant_update, zone_change) def to_dict(self, include_thumbnail: bool = False): snapshot_time = ( @@ -243,7 +240,7 @@ class TrackedObject: if bounding_box: thickness = 2 - color = COLOR_MAP[self.obj_data["label"]] + color = self.colormap[self.obj_data["label"]] # draw the bounding boxes on the frame box = self.thumbnail_data["box"] @@ -318,7 +315,9 @@ def zone_filtered(obj: TrackedObject, object_config): # Maintains the state of a camera class CameraState: - def __init__(self, name, config, frame_manager): + def __init__( + self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager + ): self.name = name self.config = config self.camera_config = config.cameras[name] @@ -351,7 +350,7 @@ class CameraState: for obj in tracked_objects.values(): if obj["frame_time"] == frame_time: thickness = 2 - color = COLOR_MAP[obj["label"]] + color = self.config.model.colormap[obj["label"]] else: thickness = 1 color = (255, 0, 0) @@ -392,7 +391,7 @@ class CameraState: cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness) if draw_options.get("mask"): - mask_overlay = np.where(self.camera_config.motion_mask == [0]) + mask_overlay = np.where(self.camera_config.motion.mask == [0]) frame_copy[mask_overlay] = [0, 0, 0] if draw_options.get("motion_boxes"): @@ -442,7 +441,11 @@ class CameraState: for id in new_ids: new_obj = tracked_objects[id] = TrackedObject( - self.name, self.camera_config, self.frame_cache, current_detections[id] + self.name, + self.config.model.colormap, + self.camera_config, + self.frame_cache, + current_detections[id], ) # call event handlers @@ -451,7 +454,9 @@ class CameraState: for id in updated_ids: updated_obj = tracked_objects[id] - significant_update = updated_obj.update(frame_time, current_detections[id]) + significant_update, zone_change = updated_obj.update( + frame_time, current_detections[id] + ) if significant_update: # ensure this frame is stored in the cache @@ -464,11 +469,12 @@ class CameraState: updated_obj.last_updated = frame_time # if it has been more than 5 seconds since the last publish - # and the last update is greater than the last publish + # and the last update is greater than the last publish or + # the object has changed zones if ( frame_time - updated_obj.last_published > 5 and updated_obj.last_updated > updated_obj.last_published - ): + ) or zone_change: # call event handlers for c in self.callbacks["update"]: c(self.name, updated_obj, frame_time) diff --git a/frigate/output.py b/frigate/output.py index 9b265c530..3ae840b59 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -159,9 +159,16 @@ class BirdsEyeFrameManager: frame = None channel_dims = None else: - frame = self.frame_manager.get( - f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv - ) + try: + frame = self.frame_manager.get( + f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv + ) + except FileNotFoundError: + # TODO: better frame management would prevent this edge case + logger.warning( + f"Unable to copy frame {camera}{frame_time} to birdseye." + ) + return channel_dims = self.cameras[camera]["channel_dims"] copy_yuv_to_position( @@ -346,10 +353,14 @@ def output_frames(config: FrigateConfig, video_output_queue): broadcasters = {} for camera, cam_config in config.cameras.items(): + width = int( + cam_config.live.height + * (cam_config.frame_shape[1] / cam_config.frame_shape[0]) + ) converters[camera] = FFMpegConverter( cam_config.frame_shape[1], cam_config.frame_shape[0], - cam_config.live.width, + width, cam_config.live.height, cam_config.live.quality, ) diff --git a/frigate/process_clip.py b/frigate/process_clip.py index b6462121c..ee9240338 100644 --- a/frigate/process_clip.py +++ b/frigate/process_clip.py @@ -14,7 +14,7 @@ import numpy as np from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig from frigate.edgetpu import LocalObjectDetector from frigate.motion import MotionDetector -from frigate.object_processing import COLOR_MAP, CameraState +from frigate.object_processing import CameraState from frigate.objects import ObjectTracker from frigate.util import ( DictFrameManager, diff --git a/frigate/record.py b/frigate/record.py index fcf493ce5..c5150103d 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -3,6 +3,7 @@ import itertools import logging import os import random +import shutil import string import subprocess as sp import threading @@ -10,9 +11,11 @@ from pathlib import Path import psutil +from peewee import JOIN + from frigate.config import FrigateConfig -from frigate.const import RECORD_DIR -from frigate.models import Recordings +from frigate.const import CACHE_DIR, RECORD_DIR +from frigate.models import Event, Recordings logger = logging.getLogger(__name__) @@ -45,8 +48,10 @@ class RecordingMaintainer(threading.Thread): def move_files(self): recordings = [ d - for d in os.listdir(RECORD_DIR) - if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4") + for d in os.listdir(CACHE_DIR) + if os.path.isfile(os.path.join(CACHE_DIR, d)) + and d.endswith(".mp4") + and not d.startswith("clip_") ] files_in_use = [] @@ -57,19 +62,26 @@ class RecordingMaintainer(threading.Thread): flist = process.open_files() if flist: for nt in flist: - if nt.path.startswith(RECORD_DIR): + if nt.path.startswith(CACHE_DIR): files_in_use.append(nt.path.split("/")[-1]) except: continue for f in recordings: + # Skip files currently in use if f in files_in_use: continue + cache_path = os.path.join(CACHE_DIR, f) basename = os.path.splitext(f)[0] camera, date = basename.rsplit("-", maxsplit=1) start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S") + # Just delete files if recordings are turned off + if not self.config.cameras[camera].record.enabled: + Path(cache_path).unlink(missing_ok=True) + continue + ffprobe_cmd = [ "ffprobe", "-v", @@ -78,7 +90,7 @@ class RecordingMaintainer(threading.Thread): "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", - f"{os.path.join(RECORD_DIR, f)}", + f"{cache_path}", ] p = sp.run(ffprobe_cmd, capture_output=True) if p.returncode == 0: @@ -86,7 +98,7 @@ class RecordingMaintainer(threading.Thread): end_time = start_time + datetime.timedelta(seconds=duration) else: logger.info(f"bad file: {f}") - os.remove(os.path.join(RECORD_DIR, f)) + Path(cache_path).unlink(missing_ok=True) continue directory = os.path.join( @@ -99,7 +111,9 @@ class RecordingMaintainer(threading.Thread): file_name = f"{start_time.strftime('%M.%S.mp4')}" file_path = os.path.join(directory, file_name) - os.rename(os.path.join(RECORD_DIR, f), file_path) + # copy then delete is required when recordings are stored on some network drives + shutil.copyfile(cache_path, file_path) + os.remove(cache_path) rand_id = "".join( random.choices(string.ascii_lowercase + string.digits, k=6) @@ -113,30 +127,166 @@ class RecordingMaintainer(threading.Thread): duration=duration, ) + def run(self): + # Check for new files every 5 seconds + while not self.stop_event.wait(5): + self.move_files() + + logger.info(f"Exiting recording maintenance...") + + +class RecordingCleanup(threading.Thread): + def __init__(self, config: FrigateConfig, stop_event): + threading.Thread.__init__(self) + self.name = "recording_cleanup" + self.config = config + self.stop_event = stop_event + + def clean_tmp_clips(self): + # delete any clips more than 5 minutes old + for p in Path("/tmp/cache").rglob("clip_*.mp4"): + logger.debug(f"Checking tmp clip {p}.") + if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1): + logger.debug("Deleting tmp clip.") + p.unlink(missing_ok=True) + + def expire_recordings(self): + logger.debug("Start expire recordings (new).") + + logger.debug("Start deleted cameras.") + # Handle deleted cameras + no_camera_recordings: Recordings = Recordings.select().where( + Recordings.camera.not_in(list(self.config.cameras.keys())), + ) + + for recording in no_camera_recordings: + expire_days = self.config.record.retain_days + expire_before = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days) + ).timestamp() + if recording.end_time < expire_before: + Path(recording.path).unlink(missing_ok=True) + Recordings.delete_by_id(recording.id) + logger.debug("End deleted cameras.") + + logger.debug("Start all cameras.") + for camera, config in self.config.cameras.items(): + logger.debug(f"Start camera: {camera}.") + # When deleting recordings without events, we have to keep at LEAST the configured max clip duration + min_end = ( + datetime.datetime.now() + - datetime.timedelta(seconds=config.record.events.max_seconds) + ).timestamp() + expire_days = config.record.retain_days + expire_before = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days) + ).timestamp() + expire_date = min(min_end, expire_before) + + # Get recordings to remove + recordings: Recordings = Recordings.select().where( + Recordings.camera == camera, + Recordings.end_time < expire_date, + ) + + for recording in recordings: + # See if there are any associated events + events: Event = Event.select().where( + Event.camera == recording.camera, + ( + Event.start_time.between( + recording.start_time, recording.end_time + ) + | Event.end_time.between( + recording.start_time, recording.end_time + ) + | ( + (recording.start_time > Event.start_time) + & (recording.end_time < Event.end_time) + ) + ), + ) + keep = False + event_ids = set() + + event: Event + for event in events: + event_ids.add(event.id) + # Check event/label retention and keep the recording if within window + expire_days_event = ( + 0 + if not config.record.events.enabled + else config.record.events.retain.objects.get( + event.label, config.record.events.retain.default + ) + ) + expire_before_event = ( + datetime.datetime.now() + - datetime.timedelta(days=expire_days_event) + ).timestamp() + if recording.end_time >= expire_before_event: + keep = True + + # Delete recordings outside of the retention window + if not keep: + Path(recording.path).unlink(missing_ok=True) + Recordings.delete_by_id(recording.id) + if event_ids: + # Update associated events + Event.update(has_clip=False).where( + Event.id.in_(list(event_ids)) + ).execute() + + logger.debug(f"End camera: {camera}.") + + logger.debug("End all cameras.") + logger.debug("End expire recordings (new).") + def expire_files(self): + logger.debug("Start expire files (legacy).") + + shortest_retention = self.config.record.retain_days + default_expire = ( + datetime.datetime.now().timestamp() + - SECONDS_IN_DAY * self.config.record.retain_days + ) delete_before = {} for name, camera in self.config.cameras.items(): delete_before[name] = ( datetime.datetime.now().timestamp() - SECONDS_IN_DAY * camera.record.retain_days ) + if camera.record.retain_days < shortest_retention: + shortest_retention = camera.record.retain_days - for p in Path("/media/frigate/recordings").rglob("*.mp4"): - if not p.parent.name in delete_before: + logger.debug(f"Shortest retention: {shortest_retention}") + process = sp.run( + ["find", RECORD_DIR, "-type", "f", "-mtime", f"+{shortest_retention}"], + capture_output=True, + text=True, + ) + files_to_check = process.stdout.splitlines() + + for f in files_to_check: + p = Path(f) + # Ignore files that have a record in the recordings DB + if Recordings.select().where(Recordings.path == str(p)).count(): continue - if p.stat().st_mtime < delete_before[p.parent.name]: - Recordings.delete().where(Recordings.path == str(p)).execute() + if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire): p.unlink(missing_ok=True) + logger.debug("End expire files (legacy).") + def run(self): + # Expire recordings every minute, clean directories every hour. for counter in itertools.cycle(range(60)): - if self.stop_event.wait(10): - logger.info(f"Exiting recording maintenance...") + if self.stop_event.wait(60): + logger.info(f"Exiting recording cleanup...") break - # only expire events every 10 minutes, but check for new files every 10 seconds + self.expire_recordings() + self.clean_tmp_clips() + if counter == 0: self.expire_files() remove_empty_directories(RECORD_DIR) - - self.move_files() diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 6a91a2814..20fda3a05 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -18,8 +18,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -42,8 +45,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -60,8 +66,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -82,8 +91,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": {"track": ["cat"]}, } }, @@ -105,8 +117,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -130,8 +145,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -152,8 +170,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": { "track": ["person", "dog"], "filters": {"dog": {"threshold": 0.7}}, @@ -179,8 +200,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": { "mask": "0,0,1,1,0,1", "filters": {"dog": {"mask": "1,1,1,1,1,1"}}, @@ -197,6 +221,34 @@ class TestConfig(unittest.TestCase): assert len(back_camera.objects.filters["dog"].raw_mask) == 2 assert len(back_camera.objects.filters["person"].raw_mask) == 1 + def test_default_input_args(self): + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert "-rtsp_transport" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"] + def test_ffmpeg_params_global(self): config = { "ffmpeg": {"input_args": "-re"}, @@ -208,8 +260,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": { "track": ["person", "dog"], "filters": {"dog": {"threshold": 0.7}}, @@ -235,8 +290,11 @@ class TestConfig(unittest.TestCase): ], "input_args": ["-re"], }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": { "track": ["person", "dog"], "filters": {"dog": {"threshold": 0.7}}, @@ -267,8 +325,11 @@ class TestConfig(unittest.TestCase): ], "input_args": "test3", }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "objects": { "track": ["person", "dog"], "filters": {"dog": {"threshold": 0.7}}, @@ -288,7 +349,9 @@ class TestConfig(unittest.TestCase): def test_inherit_clips_retention(self): config = { "mqtt": {"host": "mqtt"}, - "clips": {"retain": {"default": 20, "objects": {"person": 30}}}, + "record": { + "events": {"retain": {"default": 20, "objects": {"person": 30}}} + }, "cameras": { "back": { "ffmpeg": { @@ -296,8 +359,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -305,12 +371,16 @@ class TestConfig(unittest.TestCase): assert config == frigate_config.dict(exclude_unset=True) runtime_config = frigate_config.runtime_config - assert runtime_config.cameras["back"].clips.retain.objects["person"] == 30 + assert ( + runtime_config.cameras["back"].record.events.retain.objects["person"] == 30 + ) def test_roles_listed_twice_throws_error(self): config = { "mqtt": {"host": "mqtt"}, - "clips": {"retain": {"default": 20, "objects": {"person": 30}}}, + "record": { + "events": {"retain": {"default": 20, "objects": {"person": 30}}} + }, "cameras": { "back": { "ffmpeg": { @@ -319,8 +389,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video2", "roles": ["detect"]}, ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -329,7 +402,9 @@ class TestConfig(unittest.TestCase): def test_zone_matching_camera_name_throws_error(self): config = { "mqtt": {"host": "mqtt"}, - "clips": {"retain": {"default": 20, "objects": {"person": 30}}}, + "record": { + "events": {"retain": {"default": 20, "objects": {"person": 30}}} + }, "cameras": { "back": { "ffmpeg": { @@ -337,8 +412,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "zones": {"back": {"coordinates": "1,1,1,1,1,1"}}, } }, @@ -348,7 +426,9 @@ class TestConfig(unittest.TestCase): def test_zone_assigns_color_and_contour(self): config = { "mqtt": {"host": "mqtt"}, - "clips": {"retain": {"default": 20, "objects": {"person": 30}}}, + "record": { + "events": {"retain": {"default": 20, "objects": {"person": 30}}} + }, "cameras": { "back": { "ffmpeg": { @@ -356,8 +436,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, "zones": {"test": {"coordinates": "1,1,1,1,1,1"}}, } }, @@ -374,7 +457,9 @@ class TestConfig(unittest.TestCase): def test_clips_should_default_to_global_objects(self): config = { "mqtt": {"host": "mqtt"}, - "clips": {"retain": {"default": 20, "objects": {"person": 30}}}, + "record": { + "events": {"retain": {"default": 20, "objects": {"person": 30}}} + }, "objects": {"track": ["person", "dog"]}, "cameras": { "back": { @@ -383,9 +468,12 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} ] }, - "height": 1080, - "width": 1920, - "clips": {"enabled": True}, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + "record": {"events": {"enabled": True}}, } }, } @@ -394,8 +482,8 @@ class TestConfig(unittest.TestCase): runtime_config = frigate_config.runtime_config back_camera = runtime_config.cameras["back"] - assert back_camera.clips.objects is None - assert back_camera.clips.retain.objects["person"] == 30 + assert back_camera.record.events.objects is None + assert back_camera.record.events.retain.objects["person"] == 30 def test_role_assigned_but_not_enabled(self): config = { @@ -411,8 +499,11 @@ class TestConfig(unittest.TestCase): {"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]}, ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -438,9 +529,12 @@ class TestConfig(unittest.TestCase): }, ] }, - "height": 1080, - "width": 1920, - "detect": {"enabled": True}, + "detect": { + "enabled": True, + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -465,8 +559,11 @@ class TestConfig(unittest.TestCase): }, ] }, - "height": 480, - "width": 640, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -491,8 +588,11 @@ class TestConfig(unittest.TestCase): }, ] }, - "height": 1080, - "width": 1920, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, } }, } @@ -501,7 +601,96 @@ class TestConfig(unittest.TestCase): assert config == frigate_config.dict(exclude_unset=True) runtime_config = frigate_config.runtime_config - assert round(runtime_config.cameras["back"].motion.contour_area) == 225 + assert round(runtime_config.cameras["back"].motion.contour_area) == 99 + + def test_merge_labelmap(self): + + config = { + "mqtt": {"host": "mqtt"}, + "model": {"labelmap": {7: "truck"}}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.model.merged_labelmap[7] == "truck" + + def test_default_labelmap_empty(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.model.merged_labelmap[0] == "person" + + def test_default_labelmap(self): + + config = { + "mqtt": {"host": "mqtt"}, + "model": {"width": 320, "height": 320}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.model.merged_labelmap[0] == "person" if __name__ == "__main__": diff --git a/frigate/util.py b/frigate/util.py index 0ae2848e9..dae3845f2 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -17,6 +17,7 @@ from typing import AnyStr import cv2 import matplotlib.pyplot as plt import numpy as np +import os logger = logging.getLogger(__name__) @@ -518,6 +519,10 @@ def clipped(obj, frame_shape): return False +def restart_frigate(): + os.kill(os.getpid(), signal.SIGTERM) + + class EventsPerSecond: def __init__(self, max_events=1000): self._start = None diff --git a/frigate/video.py b/frigate/video.py index 23897dabd..3108db4a3 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -216,6 +216,13 @@ class CameraWatchdog(threading.Thread): now = datetime.datetime.now().timestamp() if not self.capture_thread.is_alive(): + self.logger.error( + f"FFMPEG process crashed unexpectedly for {self.camera_name}." + ) + self.logger.error( + "The following ffmpeg logs include the last 100 lines prior to exit." + ) + self.logger.error("You may have invalid args defined for this camera.") self.logpipe.dump() self.start_ffmpeg_detect() elif now - self.capture_thread.current_frame.value > 20: @@ -318,6 +325,7 @@ def track_camera( name, config: CameraConfig, model_shape, + labelmap, detection_queue, result_connection, detected_objects_queue, @@ -344,7 +352,7 @@ def track_camera( motion_detector = MotionDetector(frame_shape, config.motion) object_detector = RemoteObjectDetector( - name, "/labelmap.txt", detection_queue, result_connection, model_shape + name, labelmap, detection_queue, result_connection, model_shape ) object_tracker = ObjectTracker(config.detect) diff --git a/web/src/App.jsx b/web/src/App.jsx index 1212f293f..4ddedece5 100644 --- a/web/src/App.jsx +++ b/web/src/App.jsx @@ -23,7 +23,7 @@ export default function App() { ) : (
-
+
diff --git a/web/src/AppBar.jsx b/web/src/AppBar.jsx index 6a3ff2a72..362376874 100644 --- a/web/src/AppBar.jsx +++ b/web/src/AppBar.jsx @@ -5,12 +5,18 @@ import Menu, { MenuItem, MenuSeparator } from './components/Menu'; import AutoAwesomeIcon from './icons/AutoAwesome'; import LightModeIcon from './icons/LightMode'; import DarkModeIcon from './icons/DarkMode'; +import FrigateRestartIcon from './icons/FrigateRestart'; +import Dialog from './components/Dialog'; import { useDarkMode } from './context'; import { useCallback, useRef, useState } from 'preact/hooks'; +import { useRestart } from './api/mqtt'; export default function AppBar() { const [showMoreMenu, setShowMoreMenu] = useState(false); + const [showDialog, setShowDialog] = useState(false); + const [showDialogWait, setShowDialogWait] = useState(false); const { setDarkMode } = useDarkMode(); + const { send: sendRestart } = useRestart(); const handleSelectDarkMode = useCallback( (value, label) => { @@ -30,6 +36,21 @@ export default function AppBar() { setShowMoreMenu(false); }, [setShowMoreMenu]); + const handleClickRestartDialog = useCallback(() => { + setShowDialog(false); + setShowDialogWait(true); + sendRestart(); + }, [setShowDialog]); // eslint-disable-line react-hooks/exhaustive-deps + + const handleDismissRestartDialog = useCallback(() => { + setShowDialog(false); + }, [setShowDialog]); + + const handleRestart = useCallback(() => { + setShowMoreMenu(false); + setShowDialog(true); + }, [setShowDialog]); + return ( @@ -39,8 +60,27 @@ export default function AppBar() { + + ) : null} + {showDialog ? ( + + ) : null} + {showDialogWait ? ( + + ) : null} ); } diff --git a/web/src/api/__tests__/mqtt.test.jsx b/web/src/api/__tests__/mqtt.test.jsx index 8e1705b7c..31b539522 100644 --- a/web/src/api/__tests__/mqtt.test.jsx +++ b/web/src/api/__tests__/mqtt.test.jsx @@ -107,12 +107,12 @@ describe('MqttProvider', () => { ); }); - test('prefills the clips/detect/snapshots state from config', async () => { + test('prefills the recordings/detect/snapshots state from config', async () => { jest.spyOn(Date, 'now').mockReturnValue(123456); const config = { cameras: { - front: { name: 'front', detect: { enabled: true }, clips: { enabled: false }, snapshots: { enabled: true } }, - side: { name: 'side', detect: { enabled: false }, clips: { enabled: false }, snapshots: { enabled: false } }, + front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } }, + side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false } }, }, }; render( @@ -122,10 +122,10 @@ describe('MqttProvider', () => { ); await screen.findByTestId('data'); expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}'); - expect(screen.getByTestId('front/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); + expect(screen.getByTestId('front/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}'); expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); - expect(screen.getByTestId('side/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); + expect(screen.getByTestId('side/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); }); }); diff --git a/web/src/api/mqtt.jsx b/web/src/api/mqtt.jsx index 6268b3d62..5d7639450 100644 --- a/web/src/api/mqtt.jsx +++ b/web/src/api/mqtt.jsx @@ -41,8 +41,8 @@ export function MqttProvider({ useEffect(() => { Object.keys(config.cameras).forEach((camera) => { - const { name, clips, detect, snapshots } = config.cameras[camera]; - dispatch({ topic: `${name}/clips/state`, payload: clips.enabled ? 'ON' : 'OFF' }); + const { name, record, detect, snapshots } = config.cameras[camera]; + dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF' }); dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF' }); dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF' }); }); @@ -101,12 +101,12 @@ export function useDetectState(camera) { return { payload, send, connected }; } -export function useClipsState(camera) { +export function useRecordingsState(camera) { const { value: { payload }, send, connected, - } = useMqtt(`${camera}/clips/state`, `${camera}/clips/set`); + } = useMqtt(`${camera}/recordings/state`, `${camera}/recordings/set`); return { payload, send, connected }; } @@ -118,3 +118,12 @@ export function useSnapshotsState(camera) { } = useMqtt(`${camera}/snapshots/state`, `${camera}/snapshots/set`); return { payload, send, connected }; } + +export function useRestart() { + const { + value: { payload }, + send, + connected, + } = useMqtt('restart', 'restart'); + return { payload, send, connected }; +} diff --git a/web/src/components/AppBar.jsx b/web/src/components/AppBar.jsx index c35a4ad4a..567fe291d 100644 --- a/web/src/components/AppBar.jsx +++ b/web/src/components/AppBar.jsx @@ -37,13 +37,13 @@ export default function AppBar({ title: Title, overflowRef, onOverflowClick }) { return (
-
@@ -54,7 +54,7 @@ export default function AppBar({ title: Title, overflowRef, onOverflowClick }) { + ); + })} + + ); +} diff --git a/web/src/components/CameraImage.jsx b/web/src/components/CameraImage.jsx index 52f537834..a0896e2c8 100644 --- a/web/src/components/CameraImage.jsx +++ b/web/src/components/CameraImage.jsx @@ -12,7 +12,8 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch const canvasRef = useRef(null); const [{ width: availableWidth }] = useResizeObserver(containerRef); - const { name, width, height } = config.cameras[camera]; + const { name } = config.cameras[camera]; + const { width, height } = config.cameras[camera].detect; const aspectRatio = width / height; const scaledHeight = useMemo(() => { diff --git a/web/src/components/JSMpegPlayer.jsx b/web/src/components/JSMpegPlayer.jsx index 9e45b598d..a021c4953 100644 --- a/web/src/components/JSMpegPlayer.jsx +++ b/web/src/components/JSMpegPlayer.jsx @@ -12,7 +12,7 @@ export default function JSMpegPlayer({ camera }) { playerRef.current, url, {}, - {protocols: [], audio: false} + {protocols: [], audio: false, videoBufferSize: 1024*1024*4} ); const fullscreen = () => { diff --git a/web/src/components/NavigationDrawer.jsx b/web/src/components/NavigationDrawer.jsx index 417e7fb77..cc65d3d38 100644 --- a/web/src/components/NavigationDrawer.jsx +++ b/web/src/components/NavigationDrawer.jsx @@ -22,7 +22,7 @@ export default function NavigationDrawer({ children, header }) { onClick={handleDismiss} > {header ? ( -
+
{header}
) : null} diff --git a/web/src/components/__tests__/CameraImage.test.jsx b/web/src/components/__tests__/CameraImage.test.jsx index 7cdcd7ef3..2546ca4db 100644 --- a/web/src/components/__tests__/CameraImage.test.jsx +++ b/web/src/components/__tests__/CameraImage.test.jsx @@ -7,7 +7,7 @@ import { render, screen } from '@testing-library/preact'; describe('CameraImage', () => { beforeEach(() => { jest.spyOn(Api, 'useConfig').mockImplementation(() => { - return { data: { cameras: { front: { name: 'front', width: 1280, height: 720 } } } }; + return { data: { cameras: { front: { name: 'front', detect: { width: 1280, height: 720 } } } } }; }); jest.spyOn(Api, 'useApiHost').mockReturnValue('http://base-url.local:5000'); jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]); diff --git a/web/src/icons/FrigateRestart.jsx b/web/src/icons/FrigateRestart.jsx new file mode 100644 index 000000000..9240678c7 --- /dev/null +++ b/web/src/icons/FrigateRestart.jsx @@ -0,0 +1,13 @@ +import { h } from 'preact'; +import { memo } from 'preact/compat'; + +export function FrigateRestart({ className = '' }) { + return ( + + + + + ); +} + +export default memo(FrigateRestart); diff --git a/web/src/routes/Camera.jsx b/web/src/routes/Camera.jsx index 7f5527df4..b134fd589 100644 --- a/web/src/routes/Camera.jsx +++ b/web/src/routes/Camera.jsx @@ -7,6 +7,7 @@ import Heading from '../components/Heading'; import Link from '../components/Link'; import SettingsIcon from '../icons/Settings'; import Switch from '../components/Switch'; +import ButtonsTabbed from '../components/ButtonsTabbed'; import { usePersistence } from '../context'; import { useCallback, useMemo, useState } from 'preact/hooks'; import { useApiHost, useConfig } from '../api'; @@ -112,16 +113,7 @@ export default function Camera({ camera }) { return (
{camera} -
- -
+ {player} diff --git a/web/src/routes/CameraMap.jsx b/web/src/routes/CameraMap.jsx index 1e3d32760..d6837ba8f 100644 --- a/web/src/routes/CameraMap.jsx +++ b/web/src/routes/CameraMap.jsx @@ -15,13 +15,16 @@ export default function CameraMasks({ camera, url }) { const cameraConfig = config.cameras[camera]; const { - width, - height, motion: { mask: motionMask }, objects: { filters: objectFilters }, zones, } = cameraConfig; + const { + width, + height, + } = cameraConfig.detect; + const [{ width: scaledWidth }] = useResizeObserver(imageRef); const imageScale = scaledWidth / width; diff --git a/web/src/routes/Cameras.jsx b/web/src/routes/Cameras.jsx index b0bb5a228..94b7cb7f1 100644 --- a/web/src/routes/Cameras.jsx +++ b/web/src/routes/Cameras.jsx @@ -5,7 +5,7 @@ import CameraImage from '../components/CameraImage'; import ClipIcon from '../icons/Clip'; import MotionIcon from '../icons/Motion'; import SnapshotIcon from '../icons/Snapshot'; -import { useDetectState, useClipsState, useSnapshotsState } from '../api/mqtt'; +import { useDetectState, useRecordingsState, useSnapshotsState } from '../api/mqtt'; import { useConfig, FetchStatus } from '../api'; import { useMemo } from 'preact/hooks'; @@ -25,7 +25,7 @@ export default function Cameras() { function Camera({ name, conf }) { const { payload: detectValue, send: sendDetect } = useDetectState(name); - const { payload: clipValue, send: sendClips } = useClipsState(name); + const { payload: recordValue, send: sendRecordings } = useRecordingsState(name); const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name); const href = `/cameras/${name}`; const buttons = useMemo(() => { @@ -46,11 +46,11 @@ function Camera({ name, conf }) { }, }, { - name: `Toggle clips ${clipValue === 'ON' ? 'off' : 'on'}`, + name: `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}`, icon: ClipIcon, - color: clipValue === 'ON' ? 'blue' : 'gray', + color: recordValue === 'ON' ? 'blue' : 'gray', onClick: () => { - sendClips(clipValue === 'ON' ? 'OFF' : 'ON'); + sendRecordings(recordValue === 'ON' ? 'OFF' : 'ON'); }, }, { @@ -62,7 +62,7 @@ function Camera({ name, conf }) { }, }, ], - [detectValue, sendDetect, clipValue, sendClips, snapshotValue, sendSnapshots] + [detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots] ); return ( diff --git a/web/src/routes/Event.jsx b/web/src/routes/Event.jsx index ae6cb89b1..3cbe4e60f 100644 --- a/web/src/routes/Event.jsx +++ b/web/src/routes/Event.jsx @@ -115,8 +115,8 @@ export default function Event({ eventId }) { options={{ sources: [ { - src: `${apiHost}/clips/${data.camera}-${eventId}.mp4`, - type: 'video/mp4', + src: `${apiHost}/vod/event/${eventId}/index.m3u8`, + type: 'application/vnd.apple.mpegurl', }, ], poster: data.has_snapshot @@ -127,10 +127,20 @@ export default function Event({ eventId }) { onReady={(player) => {}} />
- -
diff --git a/web/src/routes/__tests__/Cameras.test.jsx b/web/src/routes/__tests__/Cameras.test.jsx index 9828e527c..68d0d0a80 100644 --- a/web/src/routes/__tests__/Cameras.test.jsx +++ b/web/src/routes/__tests__/Cameras.test.jsx @@ -51,13 +51,13 @@ describe('Cameras Route', () => { test('buttons toggle detect, clips, and snapshots', async () => { const sendDetect = jest.fn(); - const sendClips = jest.fn(); + const sendRecordings = jest.fn(); const sendSnapshots = jest.fn(); jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => { return { payload: 'ON', send: sendDetect }; }); - jest.spyOn(Mqtt, 'useClipsState').mockImplementation(() => { - return { payload: 'OFF', send: sendClips }; + jest.spyOn(Mqtt, 'useRecordingsState').mockImplementation(() => { + return { payload: 'OFF', send: sendRecordings }; }); jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => { return { payload: 'ON', send: sendSnapshots }; @@ -72,11 +72,11 @@ describe('Cameras Route', () => { fireEvent.click(screen.getAllByLabelText('Toggle snapshots off')[0]); expect(sendSnapshots).toHaveBeenCalledWith('OFF'); - fireEvent.click(screen.getAllByLabelText('Toggle clips on')[0]); - expect(sendClips).toHaveBeenCalledWith('ON'); + fireEvent.click(screen.getAllByLabelText('Toggle recordings on')[0]); + expect(sendRecordings).toHaveBeenCalledWith('ON'); expect(sendDetect).toHaveBeenCalledTimes(1); expect(sendSnapshots).toHaveBeenCalledTimes(1); - expect(sendClips).toHaveBeenCalledTimes(1); + expect(sendRecordings).toHaveBeenCalledTimes(1); }); });