Merge branch 'release-0.9.0' into datepicker

This commit is contained in:
Bernt Christian Egeland 2021-08-24 19:39:47 +02:00
commit b9f3bd8f64
50 changed files with 1436 additions and 736 deletions

View File

@ -15,10 +15,10 @@ amd64_ffmpeg:
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64 --file docker/Dockerfile.ffmpeg.amd64 . docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64 --file docker/Dockerfile.ffmpeg.amd64 .
nginx_frigate: nginx_frigate:
docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag blakeblackshear/frigate-nginx:1.0.0 --file docker/Dockerfile.nginx . docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag blakeblackshear/frigate-nginx:1.0.2 --file docker/Dockerfile.nginx .
amd64_frigate: version web amd64_frigate: version web
docker build --no-cache --tag frigate-base --build-arg ARCH=amd64 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . docker build --no-cache --tag frigate-base --build-arg ARCH=amd64 --build-arg FFMPEG_VERSION=1.1.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base .
docker build --no-cache --tag frigate --file docker/Dockerfile.amd64 . docker build --no-cache --tag frigate --file docker/Dockerfile.amd64 .
amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate amd64_all: amd64_wheels amd64_ffmpeg amd64_frigate
@ -30,7 +30,7 @@ amd64nvidia_ffmpeg:
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64nvidia --file docker/Dockerfile.ffmpeg.amd64nvidia . docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-amd64nvidia --file docker/Dockerfile.ffmpeg.amd64nvidia .
amd64nvidia_frigate: version web amd64nvidia_frigate: version web
docker build --no-cache --tag frigate-base --build-arg ARCH=amd64nvidia --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . docker build --no-cache --tag frigate-base --build-arg ARCH=amd64nvidia --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base .
docker build --no-cache --tag frigate --file docker/Dockerfile.amd64nvidia . docker build --no-cache --tag frigate --file docker/Dockerfile.amd64nvidia .
amd64nvidia_all: amd64nvidia_wheels amd64nvidia_ffmpeg amd64nvidia_frigate amd64nvidia_all: amd64nvidia_wheels amd64nvidia_ffmpeg amd64nvidia_frigate
@ -42,7 +42,7 @@ aarch64_ffmpeg:
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 . docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-aarch64 --file docker/Dockerfile.ffmpeg.aarch64 .
aarch64_frigate: version web aarch64_frigate: version web
docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base .
docker build --no-cache --tag frigate --file docker/Dockerfile.aarch64 . docker build --no-cache --tag frigate --file docker/Dockerfile.aarch64 .
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate
@ -54,7 +54,7 @@ armv7_ffmpeg:
docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-armv7 --file docker/Dockerfile.ffmpeg.armv7 . docker build --no-cache --pull --tag blakeblackshear/frigate-ffmpeg:1.2.0-armv7 --file docker/Dockerfile.ffmpeg.armv7 .
armv7_frigate: version web armv7_frigate: version web
docker build --no-cache --tag frigate-base --build-arg ARCH=armv7 --build-arg FFMPEG_VERSION=1.2.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.0 --file docker/Dockerfile.base . docker build --no-cache --tag frigate-base --build-arg ARCH=armv7 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base .
docker build --no-cache --tag frigate --file docker/Dockerfile.armv7 . docker build --no-cache --tag frigate --file docker/Dockerfile.armv7 .
armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate

View File

@ -14,7 +14,7 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
- Uses a very low overhead motion detection to determine where to run object detection - Uses a very low overhead motion detection to determine where to run object detection
- Object detection with TensorFlow runs in separate processes for maximum FPS - Object detection with TensorFlow runs in separate processes for maximum FPS
- Communicates over MQTT for easy integration into other systems - Communicates over MQTT for easy integration into other systems
- Records video clips of detected objects - Records video with retention settings based on detected objects
- 24/7 recording - 24/7 recording
- Re-streaming via RTMP to reduce the number of connections to your camera - Re-streaming via RTMP to reduce the number of connections to your camera
@ -23,16 +23,20 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
View the documentation at https://blakeblackshear.github.io/frigate View the documentation at https://blakeblackshear.github.io/frigate
## Donations ## Donations
If you would like to make a donation to support development, please use [Github Sponsors](https://github.com/sponsors/blakeblackshear). If you would like to make a donation to support development, please use [Github Sponsors](https://github.com/sponsors/blakeblackshear).
## Screenshots ## Screenshots
Integration into Home Assistant Integration into Home Assistant
<div> <div>
<a href="docs/static/img/media_browser.png"><img src="docs/static/img/media_browser.png" height=400></a> <a href="docs/static/img/media_browser.png"><img src="docs/static/img/media_browser.png" height=400></a>
<a href="docs/static/img/notification.png"><img src="docs/static/img/notification.png" height=400></a> <a href="docs/static/img/notification.png"><img src="docs/static/img/notification.png" height=400></a>
</div> </div>
Also comes with a builtin UI: Also comes with a builtin UI:
<div> <div>
<a href="docs/static/img/home-ui.png"><img src="docs/static/img/home-ui.png" height=400></a> <a href="docs/static/img/home-ui.png"><img src="docs/static/img/home-ui.png" height=400></a>
<a href="docs/static/img/camera-ui.png"><img src="docs/static/img/camera-ui.png" height=400></a> <a href="docs/static/img/camera-ui.png"><img src="docs/static/img/camera-ui.png" height=400></a>

View File

@ -16,7 +16,13 @@ RUN apt-get -qq update \
libpng16-16 \ libpng16-16 \
libtiff5 \ libtiff5 \
libdc1394-22 \ libdc1394-22 \
## Tensorflow lite
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_aarch64.whl \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
# s6-overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-aarch64-installer /tmp/
RUN chmod +x /tmp/s6-overlay-aarch64-installer && /tmp/s6-overlay-aarch64-installer /
ENTRYPOINT ["/init"]
CMD ["python3", "-u", "-m", "frigate"]

View File

@ -16,9 +16,13 @@ RUN apt-get -qq update \
libgomp1 \ libgomp1 \
# VAAPI drivers for Intel hardware accel # VAAPI drivers for Intel hardware accel
libva-drm2 libva2 libmfx1 i965-va-driver vainfo intel-media-va-driver-non-free mesa-vdpau-drivers mesa-va-drivers mesa-vdpau-drivers libdrm-radeon1 \ libva-drm2 libva2 libmfx1 i965-va-driver vainfo intel-media-va-driver-non-free mesa-vdpau-drivers mesa-va-drivers mesa-vdpau-drivers libdrm-radeon1 \
## Tensorflow lite
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
# s6-overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-amd64-installer /tmp/
RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer /
ENTRYPOINT ["/init"]
CMD ["python3", "-u", "-m", "frigate"]

View File

@ -6,10 +6,6 @@ RUN apt-get -qq update \
&& apt-get -qq install --no-install-recommends -y \ && apt-get -qq install --no-install-recommends -y \
# ffmpeg dependencies # ffmpeg dependencies
libgomp1 \ libgomp1 \
## Tensorflow lite
&& wget -q https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& python3.8 -m pip install tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& rm tflite_runtime-2.5.0-cp38-cp38-linux_x86_64.whl \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
@ -45,3 +41,11 @@ ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV NVIDIA_VISIBLE_DEVICES all ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video ENV NVIDIA_DRIVER_CAPABILITIES compute,utility,video
ENV NVIDIA_REQUIRE_CUDA "cuda>=11.1 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 brand=tesla,driver>=450,driver<451" ENV NVIDIA_REQUIRE_CUDA "cuda>=11.1 brand=tesla,driver>=418,driver<419 brand=tesla,driver>=440,driver<441 brand=tesla,driver>=450,driver<451"
# s6-overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-amd64-installer /tmp/
RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer /
ENTRYPOINT ["/init"]
CMD ["python3", "-u", "-m", "frigate"]

View File

@ -18,7 +18,13 @@ RUN apt-get -qq update \
libdc1394-22 \ libdc1394-22 \
libaom0 \ libaom0 \
libx265-179 \ libx265-179 \
## Tensorflow lite
&& pip3 install https://github.com/google-coral/pycoral/releases/download/release-frogfish/tflite_runtime-2.5.0-cp38-cp38-linux_armv7l.whl \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
# s6-overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.3/s6-overlay-armhf-installer /tmp/
RUN chmod +x /tmp/s6-overlay-armhf-installer && /tmp/s6-overlay-armhf-installer /
ENTRYPOINT ["/init"]
CMD ["python3", "-u", "-m", "frigate"]

View File

@ -10,10 +10,6 @@ FROM frigate-web as web
FROM ubuntu:20.04 FROM ubuntu:20.04
LABEL maintainer "blakeb@blakeshome.com" LABEL maintainer "blakeb@blakeshome.com"
# s6-overlay
ADD https://github.com/just-containers/s6-overlay/releases/download/v2.2.0.1/s6-overlay-amd64-installer /tmp/
RUN chmod +x /tmp/s6-overlay-amd64-installer && /tmp/s6-overlay-amd64-installer /
COPY --from=ffmpeg /usr/local /usr/local/ COPY --from=ffmpeg /usr/local /usr/local/
COPY --from=wheels /wheels/. /wheels/ COPY --from=wheels /wheels/. /wheels/
@ -30,7 +26,7 @@ RUN apt-get -qq update \
&& APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn apt-key adv --fetch-keys https://packages.cloud.google.com/apt/doc/apt-key.gpg \ && APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn apt-key adv --fetch-keys https://packages.cloud.google.com/apt/doc/apt-key.gpg \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \ && echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \ && echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections \
&& apt-get -qq update && apt-get -qq install --no-install-recommends -y libedgetpu1-max=15.0 \ && apt-get -qq update && apt-get -qq install --no-install-recommends -y libedgetpu1-max python3-tflite-runtime python3-pycoral \
&& rm -rf /var/lib/apt/lists/* /wheels \ && rm -rf /var/lib/apt/lists/* /wheels \
&& (apt-get autoremove -y; apt-get autoclean -y) && (apt-get autoremove -y; apt-get autoclean -y)
@ -57,7 +53,3 @@ COPY docker/rootfs/ /
EXPOSE 5000 EXPOSE 5000
EXPOSE 1935 EXPOSE 1935
ENTRYPOINT ["/init"]
CMD ["python3", "-u", "-m", "frigate"]

View File

@ -10,6 +10,7 @@ FROM base as build
ARG NGINX_VERSION=1.18.0 ARG NGINX_VERSION=1.18.0
ARG VOD_MODULE_VERSION=1.28 ARG VOD_MODULE_VERSION=1.28
ARG SECURE_TOKEN_MODULE_VERSION=1.4
ARG RTMP_MODULE_VERSION=1.2.1 ARG RTMP_MODULE_VERSION=1.2.1
RUN cp /etc/apt/sources.list /etc/apt/sources.list~ \ RUN cp /etc/apt/sources.list /etc/apt/sources.list~ \
@ -23,6 +24,10 @@ RUN apt-get -yqq install --no-install-recommends curl \
&& curl -sL https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz | tar -C /tmp/nginx -zx --strip-components=1 \ && curl -sL https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz | tar -C /tmp/nginx -zx --strip-components=1 \
&& mkdir /tmp/nginx-vod-module \ && mkdir /tmp/nginx-vod-module \
&& curl -sL https://github.com/kaltura/nginx-vod-module/archive/refs/tags/${VOD_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-vod-module -zx --strip-components=1 \ && curl -sL https://github.com/kaltura/nginx-vod-module/archive/refs/tags/${VOD_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-vod-module -zx --strip-components=1 \
# Patch MAX_CLIPS to allow more clips to be added than the default 128
&& sed -i 's/MAX_CLIPS (128)/MAX_CLIPS (1080)/g' /tmp/nginx-vod-module/vod/media_set.h \
&& mkdir /tmp/nginx-secure-token-module \
&& curl -sL https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-secure-token-module -zx --strip-components=1 \
&& mkdir /tmp/nginx-rtmp-module \ && mkdir /tmp/nginx-rtmp-module \
&& curl -sL https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-rtmp-module -zx --strip-components=1 && curl -sL https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz | tar -C /tmp/nginx-rtmp-module -zx --strip-components=1
@ -34,6 +39,7 @@ RUN ./configure --prefix=/usr/local/nginx \
--with-http_ssl_module \ --with-http_ssl_module \
--with-threads \ --with-threads \
--add-module=../nginx-vod-module \ --add-module=../nginx-vod-module \
--add-module=../nginx-secure-token-module \
--add-module=../nginx-rtmp-module \ --add-module=../nginx-rtmp-module \
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough" --with-cc-opt="-O3 -Wno-error=implicit-fallthrough"

View File

@ -71,6 +71,9 @@ http {
location /vod/ { location /vod/ {
vod hls; vod hls;
secure_token $args;
secure_token_types application/vnd.apple.mpegurl;
add_header Access-Control-Allow-Headers '*'; add_header Access-Control-Allow-Headers '*';
add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range'; add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range';
add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS'; add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS';
@ -122,6 +125,11 @@ http {
root /media/frigate; root /media/frigate;
} }
location /cache/ {
internal; # This tells nginx it's not accessible from the outside
alias /tmp/cache/;
}
location /recordings/ { location /recordings/ {
add_header 'Access-Control-Allow-Origin' "$http_origin" always; add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Credentials' 'true'; add_header 'Access-Control-Allow-Credentials' 'true';

View File

@ -16,7 +16,7 @@ motion:
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255. # The value should be between 1 and 255.
threshold: 25 threshold: 25
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.4% of the motion frame area) # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.17% of the motion frame area)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller
# moving objects. # moving objects.
contour_area: 100 contour_area: 100
@ -29,7 +29,7 @@ motion:
# Low values will cause things like moving shadows to be detected as motion for longer. # Low values will cause things like moving shadows to be detected as motion for longer.
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
frame_alpha: 0.2 frame_alpha: 0.2
# Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 120) # Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 180)
# This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage. # This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage.
# Lower values result in less CPU, but small changes may not register as motion. # Lower values result in less CPU, but small changes may not register as motion.
frame_height: 180 frame_height: 180
@ -81,15 +81,15 @@ environment_vars:
### `database` ### `database`
Event and clip information is managed in a sqlite database at `/media/frigate/clips/frigate.db`. If that database is deleted, clips will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. Event and recording information is managed in a sqlite database at `/media/frigate/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
If you are storing your clips on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary.
This may need to be in a custom location if network storage is used for clips. This may need to be in a custom location if network storage is used for the media folder.
```yaml ```yaml
database: database:
path: /media/frigate/clips/frigate.db path: /media/frigate/frigate.db
``` ```
### `detectors` ### `detectors`
@ -110,10 +110,17 @@ detectors:
### `model` ### `model`
If using a custom model, the width and height will need to be specified.
The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model.
```yaml ```yaml
model: model:
# Required: height of the trained model # Required: height of the trained model
height: 320 height: 320
# Required: width of the trained model # Required: width of the trained model
width: 320 width: 320
# Optional: labelmap overrides
labelmap:
7: car
``` ```

View File

@ -5,15 +5,14 @@ title: Cameras
## Setting Up Camera Inputs ## Setting Up Camera Inputs
Up to 4 inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create clips from a higher resolution stream, or vice versa. Up to 4 inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa.
Each role can only be assigned to one input per camera. The options for roles are as follows: Each role can only be assigned to one input per camera. The options for roles are as follows:
| Role | Description | | Role | Description |
| -------- | ------------------------------------------------------------------------------------ | | -------- | ------------------------------------------------------------------------------------- |
| `detect` | Main feed for object detection | | `detect` | Main feed for object detection |
| `clips` | Clips of events from objects detected in the `detect` feed. [docs](#recording-clips) | | `record` | Saves segments of the video feed based on configuration settings. [docs](#recordings) |
| `record` | Saves 60 second segments of the video feed. [docs](#247-recordings) |
| `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](#rtmp-streams) | | `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](#rtmp-streams) |
### Example ### Example
@ -31,13 +30,15 @@ cameras:
- rtmp - rtmp
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live
roles: roles:
- clips
- record - record
detect:
width: 1280 width: 1280
height: 720 height: 720
fps: 5 fps: 5
``` ```
`width`, `height`, and `fps` are only used for the `detect` role. Other streams are passed through, so there is no need to specify the resolution.
## Masks & Zones ## Masks & Zones
### Masks ### Masks
@ -93,6 +94,9 @@ zones:
# Required: List of x,y coordinates to define the polygon of the zone. # Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Coordinates can be generated at https://www.image-map.net/ # NOTE: Coordinates can be generated at https://www.image-map.net/
coordinates: 545,1077,747,939,788,805 coordinates: 545,1077,747,939,788,805
# Optional: List of objects that can trigger this zone (default: all tracked objects)
objects:
- person
# Optional: Zone level object filters. # Optional: Zone level object filters.
# NOTE: The global and camera filters are applied upstream. # NOTE: The global and camera filters are applied upstream.
filters: filters:
@ -127,31 +131,42 @@ objects:
mask: 0,0,1000,0,1000,200,0,200 mask: 0,0,1000,0,1000,200,0,200
``` ```
## Clips ## Recordings
Frigate can save video clips without any CPU overhead for encoding by simply copying the stream directly with FFmpeg. It leverages FFmpeg's segment functionality to maintain a cache of video for each camera. The cache files are written to disk at `/tmp/cache` and do not introduce memory overhead. When an object is being tracked, it will extend the cache to ensure it can assemble a clip when the event ends. Once the event ends, it again uses FFmpeg to assemble a clip by combining the video clips without any encoding by the CPU. Assembled clips are are saved to `/media/frigate/clips`. Clips are retained according to the retention settings defined on the config for each object type. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config.
These clips will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264.
:::caution :::caution
Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set.
::: :::
```yaml ```yaml
clips: record:
# Required: enables clips for the camera (default: shown below) # Optional: Enable recording (default: shown below)
# This value can be set via MQTT and will be updated in startup based on retained value
enabled: False enabled: False
# Optional: Number of seconds before the event to include in the clips (default: shown below) # Optional: Number of days to retain (default: shown below)
retain_days: 0
# Optional: Event recording settings
events:
# Optional: Enable event recording retention settings (default: shown below)
enabled: False
# Optional: Maximum length of time to retain video during long events. (default: shown below)
# NOTE: If an object is being tracked for longer than this amount of time, the cache
# will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0.
max_seconds: 300
# Optional: Number of seconds before the event to include in the event (default: shown below)
pre_capture: 5 pre_capture: 5
# Optional: Number of seconds after the event to include in the clips (default: shown below) # Optional: Number of seconds after the event to include in the event (default: shown below)
post_capture: 5 post_capture: 5
# Optional: Objects to save clips for. (default: all tracked objects) # Optional: Objects to save event for. (default: all tracked objects)
objects: objects:
- person - person
# Optional: Restrict clips to objects that entered any of the listed zones (default: no required zones) # Optional: Restrict event to objects that entered any of the listed zones (default: no required zones)
required_zones: [] required_zones: []
# Optional: Camera override for retention settings (default: global values) # Optional: Retention settings for event
retain: retain:
# Required: Default retention days (default: shown below) # Required: Default retention days (default: shown below)
default: 10 default: 10
@ -194,23 +209,6 @@ snapshots:
person: 15 person: 15
``` ```
## 24/7 Recordings
24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config.
:::caution
Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set.
:::
```yaml
# Optional: 24/7 recording configuration
record:
# Optional: Enable recording (default: global setting)
enabled: False
# Optional: Number of days to retain (default: global setting)
retain_days: 30
```
## RTMP streams ## RTMP streams
Frigate can re-stream your video feed as a RTMP feed for other applications such as Home Assistant to utilize it at `rtmp://<frigate_host>/live/<camera_name>`. Port 1935 must be open. This allows you to use a video feed for detection in frigate and Home Assistant live view at the same time without having to make two separate connections to the camera. The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. Frigate can re-stream your video feed as a RTMP feed for other applications such as Home Assistant to utilize it at `rtmp://<frigate_host>/live/<camera_name>`. Port 1935 must be open. This allows you to use a video feed for detection in frigate and Home Assistant live view at the same time without having to make two separate connections to the camera. The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
@ -263,8 +261,8 @@ cameras:
# Required: the path to the stream # Required: the path to the stream
# NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {} # NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: detect,record,clips,rtmp # Required: list of roles for this stream. valid values are: detect,record,rtmp
# NOTICE: In addition to assigning the record, clips, and rtmp roles, # NOTICE: In addition to assigning the record, and rtmp roles,
# they must also be enabled in the camera config. # they must also be enabled in the camera config.
roles: roles:
- detect - detect
@ -284,14 +282,20 @@ cameras:
# Optional: camera specific output args (default: inherit) # Optional: camera specific output args (default: inherit)
output_args: output_args:
# Required: Camera level detect settings
detect:
# Required: width of the frame for the input with the detect role # Required: width of the frame for the input with the detect role
width: 1280 width: 1280
# Required: height of the frame for the input with the detect role # Required: height of the frame for the input with the detect role
height: 720 height: 720
# Optional: desired fps for your camera for the input with the detect role # Required: desired fps for your camera for the input with the detect role
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
# Frigate will attempt to autodetect if not specified.
fps: 5 fps: 5
# Optional: enables detection for the camera (default: True)
# This value can be set via MQTT and will be updated in startup based on retained value
enabled: True
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate)
max_disappeared: 25
# Optional: camera level motion config # Optional: camera level motion config
motion: motion:
@ -312,6 +316,9 @@ cameras:
# Required: List of x,y coordinates to define the polygon of the zone. # Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Coordinates can be generated at https://www.image-map.net/ # NOTE: Coordinates can be generated at https://www.image-map.net/
coordinates: 545,1077,747,939,788,805 coordinates: 545,1077,747,939,788,805
# Optional: List of objects that can trigger this zone (default: all tracked objects)
objects:
- person
# Optional: Zone level object filters. # Optional: Zone level object filters.
# NOTE: The global and camera filters are applied upstream. # NOTE: The global and camera filters are applied upstream.
filters: filters:
@ -320,27 +327,25 @@ cameras:
max_area: 100000 max_area: 100000
threshold: 0.7 threshold: 0.7
# Optional: Camera level detect settings # Optional: 24/7 recording configuration
detect: record:
# Optional: enables detection for the camera (default: True) # Optional: Enable recording (default: global setting)
# This value can be set via MQTT and will be updated in startup based on retained value enabled: False
enabled: True # Optional: Number of days to retain (default: global setting)
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) retain_days: 30
max_disappeared: 25 # Optional: Event recording settings
events:
# Optional: save clips configuration # Required: enables event recordings for the camera (default: shown below)
clips:
# Required: enables clips for the camera (default: shown below)
# This value can be set via MQTT and will be updated in startup based on retained value # This value can be set via MQTT and will be updated in startup based on retained value
enabled: False enabled: False
# Optional: Number of seconds before the event to include in the clips (default: shown below) # Optional: Number of seconds before the event to include (default: shown below)
pre_capture: 5 pre_capture: 5
# Optional: Number of seconds after the event to include in the clips (default: shown below) # Optional: Number of seconds after the event to include (default: shown below)
post_capture: 5 post_capture: 5
# Optional: Objects to save clips for. (default: all tracked objects) # Optional: Objects to save events for. (default: all tracked objects)
objects: objects:
- person - person
# Optional: Restrict clips to objects that entered any of the listed zones (default: no required zones) # Optional: Restrict events to objects that entered any of the listed zones (default: no required zones)
required_zones: [] required_zones: []
# Optional: Camera override for retention settings (default: global values) # Optional: Camera override for retention settings (default: global values)
retain: retain:
@ -350,13 +355,6 @@ cameras:
objects: objects:
person: 15 person: 15
# Optional: 24/7 recording configuration
record:
# Optional: Enable recording (default: global setting)
enabled: False
# Optional: Number of days to retain (default: global setting)
retain_days: 30
# Optional: RTMP re-stream configuration # Optional: RTMP re-stream configuration
rtmp: rtmp:
# Required: Enable the RTMP stream (default: True) # Required: Enable the RTMP stream (default: True)
@ -364,7 +362,7 @@ cameras:
# Optional: Live stream configuration for WebUI # Optional: Live stream configuration for WebUI
live: live:
# Optional: Set the height of the live stream. (default: detect stream height) # Optional: Set the height of the live stream. (default: 720)
# This must be less than or equal to the height of the detect stream. Lower resolutions # This must be less than or equal to the height of the detect stream. Lower resolutions
# reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio. # reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio.
height: 720 height: 720
@ -483,12 +481,11 @@ input_args:
- "1" - "1"
``` ```
Note that mjpeg cameras require encoding the video into h264 for clips, recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. Note that mjpeg cameras require encoding the video into h264 for recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly.
```yaml ```yaml
output_args: output_args:
record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an
clips: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an
rtmp: -c:v libx264 -an -f flv rtmp: -c:v libx264 -an -f flv
``` ```

View File

@ -30,6 +30,15 @@ detectors:
device: usb:1 device: usb:1
``` ```
Native Coral (Dev Board):
```yaml
detectors:
coral:
type: edgetpu
device: ''
```
Multiple PCIE/M.2 Corals: Multiple PCIE/M.2 Corals:
```yaml ```yaml

View File

@ -20,6 +20,7 @@ cameras:
roles: roles:
- detect - detect
- rtmp - rtmp
detect:
width: 1280 width: 1280
height: 720 height: 720
fps: 5 fps: 5
@ -76,6 +77,7 @@ cameras:
roles: roles:
- detect - detect
- rtmp - rtmp
detect:
width: 1280 width: 1280
height: 720 height: 720
fps: 5 fps: 5
@ -83,15 +85,95 @@ cameras:
## Optional ## Optional
### `clips` ### `database`
```yaml ```yaml
clips: database:
# The path to store the SQLite DB (default: shown below)
path: /media/frigate/frigate.db
```
### `model`
```yaml
# Optional: model modifications
model:
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Label name modifications
labelmap:
2: vehicle # previously "car"
```
### `detectors`
Check the [detectors configuration page](detectors.md) for a complete list of options.
### `logger`
```yaml
# Optional: logger verbosity settings
logger:
# Optional: Default log verbosity (default: shown below)
default: info
# Optional: Component specific logger overrides
logs:
frigate.event: debug
```
### `record`
Can be overridden at the camera level. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config.
Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264.
:::caution
Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set.
:::
```yaml
record:
# Optional: Enable recording (default: shown below)
enabled: False
# Optional: Number of days to retain (default: shown below)
retain_days: 0
# Optional: Event recording settings
events:
# Optional: Enable event recording retention settings (default: shown below)
enabled: False
# Optional: Maximum length of time to retain video during long events. (default: shown below) # Optional: Maximum length of time to retain video during long events. (default: shown below)
# NOTE: If an object is being tracked for longer than this amount of time, the cache # NOTE: If an object is being tracked for longer than this amount of time, the cache
# will begin to expire and the resulting clip will be the last x seconds of the event. # will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0.
max_seconds: 300 max_seconds: 300
# Optional: Retention settings for clips (default: shown below) # Optional: Number of seconds before the event to include (default: shown below)
pre_capture: 5
# Optional: Number of seconds after the event to include (default: shown below)
post_capture: 5
# Optional: Objects to save recordings for. (default: all tracked objects)
objects:
- person
# Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Retention settings for events
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Per object retention days
objects:
person: 15
```
## `snapshots`
Can be overridden at the camera level. Global snapshot retention settings.
```yaml
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
snapshots:
retain: retain:
# Required: Default retention days (default: shown below) # Required: Default retention days (default: shown below)
default: 10 default: 10
@ -102,6 +184,8 @@ clips:
### `ffmpeg` ### `ffmpeg`
Can be overridden at the camera level.
```yaml ```yaml
ffmpeg: ffmpeg:
# Optional: global ffmpeg args (default: shown below) # Optional: global ffmpeg args (default: shown below)
@ -117,8 +201,6 @@ ffmpeg:
detect: -f rawvideo -pix_fmt yuv420p detect: -f rawvideo -pix_fmt yuv420p
# Optional: output args for record streams (default: shown below) # Optional: output args for record streams (default: shown below)
record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an
# Optional: output args for clips streams (default: shown below)
clips: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an
# Optional: output args for rtmp streams (default: shown below) # Optional: output args for rtmp streams (default: shown below)
rtmp: -c copy -f flv rtmp: -c copy -f flv
``` ```
@ -145,22 +227,6 @@ objects:
threshold: 0.7 threshold: 0.7
``` ```
### `record`
Can be overridden at the camera level. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config.
:::caution
Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set.
:::
```yaml
record:
# Optional: Enable recording
enabled: False
# Optional: Number of days to retain
retain_days: 30
```
### `birdseye` ### `birdseye`
A dynamic combined camera view of all tracked cameras. This is optimized for minimal bandwidth and server resource utilization. Encoding is only performed when actively viewing the video feed, and only active (defined by the mode) cameras are included in the view. A dynamic combined camera view of all tracked cameras. This is optimized for minimal bandwidth and server resource utilization. Encoding is only performed when actively viewing the video feed, and only active (defined by the mode) cameras are included in the view.

View File

@ -4,13 +4,13 @@ title: Default available objects
sidebar_label: Available objects sidebar_label: Available objects
--- ---
import labels from '../../../labelmap.txt'; import labels from "../../../labelmap.txt";
By default, Frigate includes the following object models from the Google Coral test data. By default, Frigate includes the following object models from the Google Coral test data.
<ul> <ul>
{labels.split('\n').map((label) => ( {labels.split("\n").map((label) => (
<li>{label.replace(/^\d+\s+/, '')}</li> <li>{label.replace(/^\d+\s+/, "")}</li>
))} ))}
</ul> </ul>
@ -23,14 +23,3 @@ Models for both CPU and EdgeTPU (Coral) are bundled in the image. You can use yo
- Labels: `/labelmap.txt` - Labels: `/labelmap.txt`
You also need to update the model width/height in the config if they differ from the defaults. You also need to update the model width/height in the config if they differ from the defaults.
### Customizing the Labelmap
The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. You must retain the same number of labels, but you can change the names. To change:
- Download the [COCO labelmap](https://dl.google.com/coral/canned_models/coco_labels.txt)
- Modify the label names as desired. For example, change `7 truck` to `7 car`
- Mount the new file at `/labelmap.txt` in the container with an additional volume
```
-v ./config/labelmap.txt:/labelmap.txt
```

View File

@ -63,7 +63,7 @@ cameras:
roles: roles:
- detect - detect
- rtmp - rtmp
- clips detect:
height: 1080 height: 1080
width: 1920 width: 1920
fps: 5 fps: 5
@ -73,7 +73,7 @@ These input args tell ffmpeg to read the mp4 file in an infinite loop. You can u
#### 3. Gather some mp4 files for testing #### 3. Gather some mp4 files for testing
Create and place these files in a `debug` folder in the root of the repo. This is also where clips and recordings will be created if you enable them in your test config. Update your config from step 2 above to point at the right file. You can check the `docker-compose.yml` file in the repo to see how the volumes are mapped. Create and place these files in a `debug` folder in the root of the repo. This is also where recordings will be created if you enable them in your test config. Update your config from step 2 above to point at the right file. You can check the `docker-compose.yml` file in the repo to see how the volumes are mapped.
#### 4. Open the repo with Visual Studio Code #### 4. Open the repo with Visual Studio Code

View File

@ -5,7 +5,7 @@ title: Recommended hardware
## Cameras ## Cameras
Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, clips, and recordings without re-encoding. Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, and recordings without re-encoding.
## Computer ## Computer

View File

@ -5,7 +5,7 @@ title: Installation
Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). See instructions below for installing the HassOS addon. Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). See instructions below for installing the HassOS addon.
For Home Assistant users, there is also a [custom component (aka integration)](https://github.com/blakeblackshear/frigate-hass-integration). This custom component adds tighter integration with Home Assistant by automatically setting up camera entities, sensors, media browser for clips and recordings, and a public API to simplify notifications. For Home Assistant users, there is also a [custom component (aka integration)](https://github.com/blakeblackshear/frigate-hass-integration). This custom component adds tighter integration with Home Assistant by automatically setting up camera entities, sensors, media browser for recordings, and a public API to simplify notifications.
Note that HassOS Addons and custom components are different things. If you are already running Frigate with Docker directly, you do not need the Addon since the Addon would run another instance of Frigate. Note that HassOS Addons and custom components are different things. If you are already running Frigate with Docker directly, you do not need the Addon since the Addon would run another instance of Frigate.

View File

@ -3,25 +3,27 @@ id: troubleshooting
title: Troubleshooting and FAQ title: Troubleshooting and FAQ
--- ---
### How can I get sound or audio in my clips and recordings? ### I am seeing a solid green image for my camera.
By default, Frigate removes audio from clips and recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/frigate/configuration/index#ffmpeg).
A solid green image means that frigate has not received any frames from ffmpeg. Check the logs to see why ffmpeg is exiting and adjust your ffmpeg args accordingly.
### How can I get sound or audio in my recordings?
By default, Frigate removes audio from recordings to reduce the likelihood of failing for invalid data. If you would like to include audio, you need to override the output args to remove `-an` for where you want to include audio. The recommended audio codec is `aac`. Not all audio codecs are supported by RTMP, so you may need to re-encode your audio with `-c:a aac`. The default ffmpeg args are shown [here](/frigate/configuration/index#ffmpeg).
### My mjpeg stream or snapshots look green and crazy ### My mjpeg stream or snapshots look green and crazy
This almost always means that the width/height defined for your camera are not correct. Double check the resolution with vlc or another player. Also make sure you don't have the width and height values backwards. This almost always means that the width/height defined for your camera are not correct. Double check the resolution with vlc or another player. Also make sure you don't have the width and height values backwards.
![mismatched-resolution](/img/mismatched-resolution.jpg) ![mismatched-resolution](/img/mismatched-resolution.jpg)
### I have clips and snapshots in my clips folder, but I can't view them in the Web UI. ### I can't view events or recordings in the Web UI.
This is usually caused one of two things:
- The permissions on the parent folder don't have execute and nginx returns a 403 error you can see in the browser logs
- In this case, try mounting a volume to `/media/frigate` inside the container instead of `/media/frigate/clips`.
- Your cameras do not send h264 encoded video and the mp4 files are not playable in the browser
Ensure your cameras send h264 encoded video
### "[mov,mp4,m4a,3gp,3g2,mj2 @ 0x5639eeb6e140] moov atom not found" ### "[mov,mp4,m4a,3gp,3g2,mj2 @ 0x5639eeb6e140] moov atom not found"
These messages in the logs are expected in certain situations. Frigate checks the integrity of the video cache before assembling clips. Occasionally these cached files will be invalid and cleaned up automatically. These messages in the logs are expected in certain situations. Frigate checks the integrity of the recordings before storing. Occasionally these cached files will be invalid and cleaned up automatically.
### "On connect called" ### "On connect called"

View File

@ -206,10 +206,6 @@ Accepts the following query string parameters, but they are only applied when an
| `crop` | int | Crop the snapshot to the (0 or 1) | | `crop` | int | Crop the snapshot to the (0 or 1) |
| `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. | | `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. |
### `/clips/<camera>-<id>.mp4`
Video clip for the given camera and event id.
### `/clips/<camera>-<id>.jpg` ### `/clips/<camera>-<id>.jpg`
JPG snapshot for the given camera and event id. JPG snapshot for the given camera and event id.

View File

@ -4,31 +4,93 @@ title: Integration with Home Assistant
sidebar_label: Home Assistant sidebar_label: Home Assistant
--- ---
The best way to integrate with Home Assistant is to use the [official integration](https://github.com/blakeblackshear/frigate-hass-integration). When configuring the integration, you will be asked for the `Host` of your frigate instance. This value should be the url you use to access Frigate in the browser and will look like `http://<host>:5000/`. If you are using HassOS with the addon, the host should be `http://ccab4aaf-frigate:5000` (or `http://ccab4aaf-frigate-beta:5000` if your are using the beta version of the addon). Home Assistant needs access to port 5000 (api) and 1935 (rtmp) for all features. The integration will setup the following entities within Home Assistant: The best way to integrate with Home Assistant is to use the [official integration](https://github.com/blakeblackshear/frigate-hass-integration).
## Sensors: ## Installation
- Stats to monitor frigate performance Available via HACS as a [custom repository](https://hacs.xyz/docs/faq/custom_repositories). To install:
- Object counts for all zones and cameras
## Cameras: - Add the custom repository:
- Cameras for image of the last detected object for each camera ```
- Camera entities with stream support (requires RTMP) Home Assistant > HACS > Integrations > [...] > Custom Repositories
```
## Media Browser: | Key | Value |
| -------------- | ----------------------------------------------------------- |
| Repository URL | https://github.com/blakeblackshear/frigate-hass-integration |
| Category | Integration |
- Rich UI with thumbnails for browsing event clips - Use [HACS](https://hacs.xyz/) to install the integration:
```
Home Assistant > HACS > Integrations > "Explore & Add Integrations" > Frigate
```
- Restart Home Assistant.
- Then add/configure the integration:
```
Home Assistant > Configuration > Integrations > Add Integration > Frigate
```
Note: You will also need
[media_source](https://www.home-assistant.io/integrations/media_source/) enabled
in your Home Assistant configuration for the Media Browser to appear.
## Configuration
When configuring the integration, you will be asked for the following parameters:
| Variable | Description |
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| URL | The `URL` of your frigate instance, the URL you use to access Frigate in the browser. This may look like `http://<host>:5000/`. If you are using HassOS with the addon, the URL should be `http://ccab4aaf-frigate:5000` (or `http://ccab4aaf-frigate-beta:5000` if your are using the beta version of the addon). Live streams required port 1935, see [RTMP streams](#streams) |
<a name="options"></a>
## Options
```
Home Assistant > Configuration > Integrations > Frigate > Options
```
| Option | Description |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| RTMP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTMP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTMP streams](#streams) below. |
## Entities Provided
| Platform | Description |
| --------------- | --------------------------------------------------------------------------------- |
| `camera` | Live camera stream (requires RTMP), camera for image of the last detected object. |
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
## Media Browser Support
The integration provides:
- Rich UI with thumbnails for browsing event recordings
- Rich UI for browsing 24/7 recordings by month, day, camera, time - Rich UI for browsing 24/7 recordings by month, day, camera, time
## API: This is accessible via "Media Browser" on the left menu panel in Home Assistant.
<a name="api"></a>
## API
- Notification API with public facing endpoints for images in notifications - Notification API with public facing endpoints for images in notifications
### Notifications ### Notifications
Frigate publishes event information in the form of a change feed via MQTT. This allows lots of customization for notifications to meet your needs. Event changes are published with `before` and `after` information as shown [here](#frigateevents). Frigate publishes event information in the form of a change feed via MQTT. This
Note that some people may not want to expose frigate to the web, so you can leverage the HA API that frigate custom_integration ties into (which is exposed to the web, and thus can be used for mobile notifications etc): allows lots of customization for notifications to meet your needs. Event changes
are published with `before` and `after` information as shown
[here](#frigateevents). Note that some people may not want to expose frigate to
the web, so you can leverage the HA API that frigate custom_integration ties
into (which is exposed to the web, and thus can be used for mobile notifications
etc):
To load an image taken by frigate from Home Assistants API see below: To load an image taken by frigate from Home Assistants API see below:
@ -57,6 +119,7 @@ automation:
data: data:
image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android' image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android'
tag: '{{trigger.payload_json["after"]["id"]}}' tag: '{{trigger.payload_json["after"]["id"]}}'
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
``` ```
```yaml ```yaml
@ -75,6 +138,7 @@ automation:
data: data:
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
tag: "{{trigger.payload_json['after']['id']}}" tag: "{{trigger.payload_json['after']['id']}}"
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
``` ```
```yaml ```yaml
@ -93,6 +157,7 @@ automation:
data: data:
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
tag: "{{trigger.payload_json['after']['id']}}" tag: "{{trigger.payload_json['after']['id']}}"
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
``` ```
```yaml ```yaml
@ -111,6 +176,7 @@ automation:
data: data:
image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg" image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}/thumbnail.jpg"
tag: "{{trigger.payload_json['after']['id']}}" tag: "{{trigger.payload_json['after']['id']}}"
when: '{{trigger.payload_json["after"]["start_time"]|int}}'
``` ```
If you are using telegram, you can fetch the image directly from Frigate: If you are using telegram, you can fetch the image directly from Frigate:
@ -131,3 +197,85 @@ automation:
- url: 'http://ccab4aaf-frigate:5000/api/events/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg' - url: 'http://ccab4aaf-frigate:5000/api/events/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg'
caption: 'A {{trigger.payload_json["after"]["label"]}} was detected on {{ trigger.payload_json["after"]["camera"] }} camera' caption: 'A {{trigger.payload_json["after"]["label"]}} was detected on {{ trigger.payload_json["after"]["camera"] }} camera'
``` ```
<a name="streams"></a>
## RTMP stream
In order for the live streams to function they need to be accessible on the RTMP
port (default: `1935`) at `<frigatehost>:1935`. Home Assistant will directly
connect to that streaming port when the live camera is viewed.
#### RTMP URL Template
For advanced usecases, this behavior can be changed with the [RTMP URL
template](#options) option. When set, this string will override the default stream
address that is derived from the default behavior described above. This option supports
[jinja2 templates](https://jinja.palletsprojects.com/) and has the `camera` dict
variables from [Frigate API](https://blakeblackshear.github.io/frigate/usage/api#apiconfig)
available for the template. Note that no Home Assistant state is available to the
template, only the camera dict from Frigate.
This is potentially useful when Frigate is behind a reverse proxy, and/or when
the default stream port is otherwise not accessible to Home Assistant (e.g.
firewall rules).
###### RTMP URL Template Examples
Use a different port number:
```
rtmp://<frigate_host>:2000/live/front_door
```
Use the camera name in the stream URL:
```
rtmp://<frigate_host>:2000/live/{{ name }}
```
Use the camera name in the stream URL, converting it to lowercase first:
```
rtmp://<frigate_host>:2000/live/{{ name|lower }}
```
## Multiple Instance Support
The Frigate integration seamlessly supports the use of multiple Frigate servers.
### Requirements for Multiple Instances
In order for multiple Frigate instances to function correctly, the
`topic_prefix` and `client_id` parameters must be set differently per server.
See [MQTT
configuration](https://blakeblackshear.github.io/frigate/configuration/index#mqtt)
for how to set these.
#### API URLs
When multiple Frigate instances are configured, [API](#api) URLs should include an
identifier to tell Home Assistant which Frigate instance to refer to. The
identifier used is the MQTT `client_id` paremeter included in the configuration,
and is used like so:
```
https://HA_URL/api/frigate/<client-id>/notifications/<event-id>/thumbnail.jpg
```
```
https://HA_URL/api/frigate/<client-id>/clips/front_door-1624599978.427826-976jaa.mp4
```
#### Default Treatment
When a single Frigate instance is configured, the `client-id` parameter need not
be specified in URLs/identifiers -- that single instance is assumed. When
multiple Frigate instances are configured, the user **must** explicitly specify
which server they are referring to.
## FAQ
### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.

View File

@ -11,6 +11,10 @@ Designed to be used as an availability topic with Home Assistant. Possible messa
"online": published when frigate is running (on startup) "online": published when frigate is running (on startup)
"offline": published right before frigate stops "offline": published right before frigate stops
### `frigate/restart`
Causes frigate to exit. Docker should be configured to automatically restart the container on exit.
### `frigate/<camera_name>/<object_name>` ### `frigate/<camera_name>/<object_name>`
Publishes the count of objects for the camera for use as a sensor in Home Assistant. Publishes the count of objects for the camera for use as a sensor in Home Assistant.
@ -84,13 +88,13 @@ Topic to turn detection for a camera on and off. Expected values are `ON` and `O
Topic with current state of detection for a camera. Published values are `ON` and `OFF`. Topic with current state of detection for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/clips/set` ### `frigate/<camera_name>/recordings/set`
Topic to turn clips for a camera on and off. Expected values are `ON` and `OFF`. Topic to turn recordings for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/clips/state` ### `frigate/<camera_name>/recordings/state`
Topic with current state of clips for a camera. Published values are `ON` and `OFF`. Topic with current state of recordings for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/snapshots/set` ### `frigate/<camera_name>/snapshots/set`

View File

@ -23,7 +23,7 @@ from frigate.models import Event, Recordings
from frigate.mqtt import create_mqtt_client, MqttSocketRelay from frigate.mqtt import create_mqtt_client, MqttSocketRelay
from frigate.object_processing import TrackedObjectProcessor from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames from frigate.output import output_frames
from frigate.record import RecordingMaintainer from frigate.record import RecordingCleanup, RecordingMaintainer
from frigate.stats import StatsEmitter, stats_init from frigate.stats import StatsEmitter, stats_init
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
@ -90,15 +90,6 @@ class FrigateApp:
assigned_roles = list( assigned_roles = list(
set([r for i in camera.ffmpeg.inputs for r in i.roles]) set([r for i in camera.ffmpeg.inputs for r in i.roles])
) )
if not camera.clips.enabled and "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips assigned to an input, but clips is not enabled."
)
elif camera.clips.enabled and not "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips enabled, but clips is not assigned to an input."
)
if not camera.record.enabled and "record" in assigned_roles: if not camera.record.enabled and "record" in assigned_roles:
logger.warning( logger.warning(
f"Camera {name} has record assigned to an input, but record is not enabled." f"Camera {name} has record assigned to an input, but record is not enabled."
@ -259,6 +250,7 @@ class FrigateApp:
name, name,
config, config,
model_shape, model_shape,
self.config.model.merged_labelmap,
self.detection_queue, self.detection_queue,
self.detection_out_events[name], self.detection_out_events[name],
self.detected_frames_queue, self.detected_frames_queue,
@ -300,6 +292,10 @@ class FrigateApp:
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event) self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start() self.recording_maintainer.start()
def start_recording_cleanup(self):
self.recording_cleanup = RecordingCleanup(self.config, self.stop_event)
self.recording_cleanup.start()
def start_stats_emitter(self): def start_stats_emitter(self):
self.stats_emitter = StatsEmitter( self.stats_emitter = StatsEmitter(
self.config, self.config,
@ -345,6 +341,7 @@ class FrigateApp:
self.start_event_processor() self.start_event_processor()
self.start_event_cleanup() self.start_event_cleanup()
self.start_recording_maintainer() self.start_recording_maintainer()
self.start_recording_cleanup()
self.start_stats_emitter() self.start_stats_emitter()
self.start_watchdog() self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id) # self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
@ -371,6 +368,7 @@ class FrigateApp:
self.event_processor.join() self.event_processor.join()
self.event_cleanup.join() self.event_cleanup.join()
self.recording_maintainer.join() self.recording_maintainer.join()
self.recording_cleanup.join()
self.stats_emitter.join() self.stats_emitter.join()
self.frigate_watchdog.join() self.frigate_watchdog.join()
self.db.stop() self.db.stop()

View File

@ -1,18 +1,19 @@
from __future__ import annotations from __future__ import annotations
from enum import Enum
import json import json
import logging import logging
import os import os
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import yaml
from pydantic import BaseModel, Field, validator from pydantic import BaseModel, Field, validator
from pydantic.fields import PrivateAttr from pydantic.fields import PrivateAttr
import yaml
from frigate.const import BASE_DIR, RECORD_DIR, CACHE_DIR from frigate.const import BASE_DIR, CACHE_DIR, RECORD_DIR
from frigate.edgetpu import load_labels
from frigate.util import create_mask, deep_merge from frigate.util import create_mask, deep_merge
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -25,7 +26,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_DETECTORS = {"coral": {"type": "edgetpu", "device": "usb"}} DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
class DetectorTypeEnum(str, Enum): class DetectorTypeEnum(str, Enum):
@ -34,9 +35,7 @@ class DetectorTypeEnum(str, Enum):
class DetectorConfig(BaseModel): class DetectorConfig(BaseModel):
type: DetectorTypeEnum = Field( type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
default=DetectorTypeEnum.edgetpu, title="Detector Type"
)
device: str = Field(default="usb", title="Device Type") device: str = Field(default="usb", title="Device Type")
num_threads: int = Field(default=3, title="Number of detection threads") num_threads: int = Field(default=3, title="Number of detection threads")
@ -68,13 +67,32 @@ class RetainConfig(BaseModel):
) )
# DEPRECATED: Will eventually be removed
class ClipsConfig(BaseModel): class ClipsConfig(BaseModel):
enabled: bool = Field(default=False, title="Save clips.")
max_seconds: int = Field(default=300, title="Maximum clip duration.") max_seconds: int = Field(default=300, title="Maximum clip duration.")
pre_capture: int = Field(default=5, title="Seconds to capture before event starts.")
post_capture: int = Field(default=5, title="Seconds to capture after event ends.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the clip.",
)
objects: Optional[List[str]] = Field(
title="List of objects to be detected in order to save the clip.",
)
retain: RetainConfig = Field( retain: RetainConfig = Field(
default_factory=RetainConfig, title="Clip retention settings." default_factory=RetainConfig, title="Clip retention settings."
) )
class RecordConfig(BaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
retain_days: int = Field(default=0, title="Recording retention period in days.")
events: ClipsConfig = Field(
default_factory=ClipsConfig, title="Event specific settings."
)
class MotionConfig(BaseModel): class MotionConfig(BaseModel):
threshold: int = Field( threshold: int = Field(
default=25, default=25,
@ -99,11 +117,13 @@ class RuntimeMotionConfig(MotionConfig):
frame_shape = config.get("frame_shape", (1, 1)) frame_shape = config.get("frame_shape", (1, 1))
if "frame_height" not in config: if "frame_height" not in config:
config["frame_height"] = max(frame_shape[0] // 6, 120) config["frame_height"] = max(frame_shape[0] // 6, 180)
if "contour_area" not in config: if "contour_area" not in config:
frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0] frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0]
config["contour_area"] = config["frame_height"] * frame_width * 0.003912363 config["contour_area"] = (
config["frame_height"] * frame_width * 0.00173611111
)
mask = config.get("mask", "") mask = config.get("mask", "")
config["raw_mask"] = mask config["raw_mask"] = mask
@ -129,6 +149,9 @@ class RuntimeMotionConfig(MotionConfig):
class DetectConfig(BaseModel): class DetectConfig(BaseModel):
height: int = Field(title="Height of the stream for the detect role.")
width: int = Field(title="Width of the stream for the detect role.")
fps: int = Field(title="Number of frames per second to process through detection.")
enabled: bool = Field(default=True, title="Detection Enabled.") enabled: bool = Field(default=True, title="Detection Enabled.")
max_disappeared: Optional[int] = Field( max_disappeared: Optional[int] = Field(
title="Maximum number of frames the object can dissapear before detection ends." title="Maximum number of frames the object can dissapear before detection ends."
@ -185,6 +208,10 @@ class ZoneConfig(BaseModel):
coordinates: Union[str, List[str]] = Field( coordinates: Union[str, List[str]] = Field(
title="Coordinates polygon for the defined zone." title="Coordinates polygon for the defined zone."
) )
objects: List[str] = Field(
default_factory=list,
title="List of objects that can trigger the zone.",
)
_color: Optional[Tuple[int, int, int]] = PrivateAttr() _color: Optional[Tuple[int, int, int]] = PrivateAttr()
_contour: np.ndarray = PrivateAttr() _contour: np.ndarray = PrivateAttr()
@ -257,26 +284,11 @@ FFMPEG_INPUT_ARGS_DEFAULT = [
] ]
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"] DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"]
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"] RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"]
SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-f",
"segment",
"-segment_time",
"10",
"-segment_format",
"mp4",
"-reset_timestamps",
"1",
"-strftime",
"1",
"-c",
"copy",
"-an",
]
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [ RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-f", "-f",
"segment", "segment",
"-segment_time", "-segment_time",
"60", "10",
"-segment_format", "-segment_format",
"mp4", "mp4",
"-reset_timestamps", "-reset_timestamps",
@ -298,10 +310,6 @@ class FfmpegOutputArgsConfig(BaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.", title="Record role FFmpeg output arguments.",
) )
clips: Union[str, List[str]] = Field(
default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Clips role FFmpeg output arguments.",
)
rtmp: Union[str, List[str]] = Field( rtmp: Union[str, List[str]] = Field(
default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT, default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="RTMP role FFmpeg output arguments.", title="RTMP role FFmpeg output arguments.",
@ -340,18 +348,6 @@ class CameraInput(BaseModel):
class CameraFfmpegConfig(FfmpegConfig): class CameraFfmpegConfig(FfmpegConfig):
inputs: List[CameraInput] = Field(title="Camera inputs.") inputs: List[CameraInput] = Field(title="Camera inputs.")
global_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg global arguments."
)
hwaccel_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg hardware acceleration arguments."
)
input_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg input arguments."
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig, title="FFmpeg output arguments."
)
@validator("inputs") @validator("inputs")
def validate_roles(cls, v): def validate_roles(cls, v):
@ -428,43 +424,18 @@ class CameraMqttConfig(BaseModel):
) )
class CameraClipsConfig(BaseModel):
enabled: bool = Field(default=False, title="Save clips.")
pre_capture: int = Field(default=5, title="Seconds to capture before event starts.")
post_capture: int = Field(default=5, title="Seconds to capture after event ends.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the clip.",
)
objects: Optional[List[str]] = Field(
title="List of objects to be detected in order to save the clip.",
)
retain: RetainConfig = Field(default_factory=RetainConfig, title="Clip retention.")
class CameraRtmpConfig(BaseModel): class CameraRtmpConfig(BaseModel):
enabled: bool = Field(default=True, title="RTMP restreaming enabled.") enabled: bool = Field(default=True, title="RTMP restreaming enabled.")
class CameraLiveConfig(BaseModel): class CameraLiveConfig(BaseModel):
height: Optional[int] = Field(title="Live camera view height") height: int = Field(default=720, title="Live camera view height")
width: Optional[int] = Field(title="Live camera view width")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
class RecordConfig(BaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
retain_days: int = Field(default=30, title="Recording retention period in days.")
class CameraConfig(BaseModel): class CameraConfig(BaseModel):
name: Optional[str] = Field(title="Camera name.") name: Optional[str] = Field(title="Camera name.")
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
height: int = Field(title="Height of the stream for the detect role.")
width: int = Field(title="Width of the stream for the detect role.")
fps: Optional[int] = Field(
title="Number of frames per second to process through Frigate."
)
best_image_timeout: int = Field( best_image_timeout: int = Field(
default=60, default=60,
title="How long to wait for the image with the highest confidence score.", title="How long to wait for the image with the highest confidence score.",
@ -472,9 +443,6 @@ class CameraConfig(BaseModel):
zones: Dict[str, ZoneConfig] = Field( zones: Dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration." default_factory=dict, title="Zone configuration."
) )
clips: CameraClipsConfig = Field(
default_factory=CameraClipsConfig, title="Clip configuration."
)
record: RecordConfig = Field( record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration." default_factory=RecordConfig, title="Record configuration."
) )
@ -492,7 +460,7 @@ class CameraConfig(BaseModel):
default_factory=ObjectConfig, title="Object configuration." default_factory=ObjectConfig, title="Object configuration."
) )
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
detect: Optional[DetectConfig] = Field(title="Object detection configuration.") detect: DetectConfig = Field(title="Object detection configuration.")
timestamp_style: TimestampStyleConfig = Field( timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig, title="Timestamp style configuration." default_factory=TimestampStyleConfig, title="Timestamp style configuration."
) )
@ -510,11 +478,11 @@ class CameraConfig(BaseModel):
@property @property
def frame_shape(self) -> Tuple[int, int]: def frame_shape(self) -> Tuple[int, int]:
return self.height, self.width return self.detect.height, self.detect.width
@property @property
def frame_shape_yuv(self) -> Tuple[int, int]: def frame_shape_yuv(self) -> Tuple[int, int]:
return self.height * 3 // 2, self.width return self.detect.height * 3 // 2, self.detect.width
@property @property
def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]: def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]:
@ -535,9 +503,17 @@ class CameraConfig(BaseModel):
if isinstance(self.ffmpeg.output_args.detect, list) if isinstance(self.ffmpeg.output_args.detect, list)
else self.ffmpeg.output_args.detect.split(" ") else self.ffmpeg.output_args.detect.split(" ")
) )
ffmpeg_output_args = detect_args + ffmpeg_output_args + ["pipe:"] ffmpeg_output_args = (
if self.fps: [
ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args "-r",
str(self.detect.fps),
"-s",
f"{self.detect.width}x{self.detect.height}",
]
+ detect_args
+ ffmpeg_output_args
+ ["pipe:"]
)
if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled: if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled:
rtmp_args = ( rtmp_args = (
self.ffmpeg.output_args.rtmp self.ffmpeg.output_args.rtmp
@ -547,17 +523,6 @@ class CameraConfig(BaseModel):
ffmpeg_output_args = ( ffmpeg_output_args = (
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
) )
if "clips" in ffmpeg_input.roles:
clips_args = (
self.ffmpeg.output_args.clips
if isinstance(self.ffmpeg.output_args.clips, list)
else self.ffmpeg.output_args.clips.split(" ")
)
ffmpeg_output_args = (
clips_args
+ [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
+ ffmpeg_output_args
)
if "record" in ffmpeg_input.roles and self.record.enabled: if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = ( record_args = (
self.ffmpeg.output_args.record self.ffmpeg.output_args.record
@ -566,7 +531,7 @@ class CameraConfig(BaseModel):
) )
ffmpeg_output_args = ( ffmpeg_output_args = (
record_args record_args
+ [f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"]
+ ffmpeg_output_args + ffmpeg_output_args
) )
@ -609,6 +574,33 @@ class DatabaseConfig(BaseModel):
class ModelConfig(BaseModel): class ModelConfig(BaseModel):
width: int = Field(default=320, title="Object detection model input width.") width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.") height: int = Field(default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field(
default_factory=dict, title="Labelmap customization."
)
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
@property
def merged_labelmap(self) -> Dict[int, str]:
return self._merged_labelmap
@property
def colormap(self) -> Dict[int, tuple[int, int, int]]:
return self._colormap
def __init__(self, **config):
super().__init__(**config)
self._merged_labelmap = {
**load_labels("/labelmap.txt"),
**config.get("labelmap", {}),
}
cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys()))
self._colormap = {}
for key, val in self._merged_labelmap.items():
self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class LogLevelEnum(str, Enum): class LogLevelEnum(str, Enum):
@ -652,9 +644,6 @@ class FrigateConfig(BaseModel):
logger: LoggerConfig = Field( logger: LoggerConfig = Field(
default_factory=LoggerConfig, title="Logging configuration." default_factory=LoggerConfig, title="Logging configuration."
) )
clips: ClipsConfig = Field(
default_factory=ClipsConfig, title="Global clips configuration."
)
record: RecordConfig = Field( record: RecordConfig = Field(
default_factory=RecordConfig, title="Global record configuration." default_factory=RecordConfig, title="Global record configuration."
) )
@ -690,7 +679,6 @@ class FrigateConfig(BaseModel):
# Global config to propegate down to camera level # Global config to propegate down to camera level
global_config = config.dict( global_config = config.dict(
include={ include={
"clips": {"retain"},
"record": ..., "record": ...,
"snapshots": ..., "snapshots": ...,
"objects": ..., "objects": ...,
@ -703,7 +691,9 @@ class FrigateConfig(BaseModel):
for name, camera in config.cameras.items(): for name, camera in config.cameras.items():
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config) merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
camera_config = CameraConfig.parse_obj({"name": name, **merged_config}) camera_config: CameraConfig = CameraConfig.parse_obj(
{"name": name, **merged_config}
)
# FFMPEG input substitution # FFMPEG input substitution
for input in camera_config.ffmpeg.inputs: for input in camera_config.ffmpeg.inputs:
@ -753,30 +743,13 @@ class FrigateConfig(BaseModel):
) )
# Default detect configuration # Default detect configuration
max_disappeared = (camera_config.fps or 5) * 5 max_disappeared = camera_config.detect.fps * 5
if camera_config.detect:
if camera_config.detect.max_disappeared is None: if camera_config.detect.max_disappeared is None:
camera_config.detect.max_disappeared = max_disappeared camera_config.detect.max_disappeared = max_disappeared
else:
camera_config.detect = DetectConfig(max_disappeared=max_disappeared)
# Default live configuration # Default live configuration
if camera_config.live: if camera_config.live is None:
if ( camera_config.live = CameraLiveConfig()
camera_config.live.height
and camera_config.live.height <= camera_config.height
):
camera_config.live.width = int(
camera_config.live.height
* (camera_config.width / camera_config.height)
)
else:
camera_config.live.height = camera_config.height
camera_config.live.width = camera_config.width
else:
camera_config.live = CameraLiveConfig(
height=camera_config.height, width=camera_config.width
)
config.cameras[name] = camera_config config.cameras[name] = camera_config

View File

@ -68,9 +68,14 @@ class LocalObjectDetector(ObjectDetector):
experimental_delegates=[edge_tpu_delegate], experimental_delegates=[edge_tpu_delegate],
) )
except ValueError: except ValueError:
logger.info("No EdgeTPU detected.") logger.error(
"No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
)
raise raise
else: else:
logger.warning(
"CPU detectors are not recommended and should only be used for testing or for trial purposes."
)
self.interpreter = tflite.Interpreter( self.interpreter = tflite.Interpreter(
model_path="/cpu_model.tflite", num_threads=num_threads model_path="/cpu_model.tflite", num_threads=num_threads
) )
@ -97,21 +102,22 @@ class LocalObjectDetector(ObjectDetector):
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke() self.interpreter.invoke()
boxes = np.squeeze(
self.interpreter.get_tensor(self.tensor_output_details[0]["index"]) boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
) class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
label_codes = np.squeeze( scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
self.interpreter.get_tensor(self.tensor_output_details[1]["index"]) count = int(
) self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
scores = np.squeeze(
self.interpreter.get_tensor(self.tensor_output_details[2]["index"])
) )
detections = np.zeros((20, 6), np.float32) detections = np.zeros((20, 6), np.float32)
for i, score in enumerate(scores):
for i in range(count):
if scores[i] < 0.4 or i == 20:
break
detections[i] = [ detections[i] = [
label_codes[i], class_ids[i],
score, float(scores[i]),
boxes[i][0], boxes[i][0],
boxes[i][1], boxes[i][1],
boxes[i][2], boxes[i][2],
@ -231,7 +237,7 @@ class EdgeTPUProcess:
class RemoteObjectDetector: class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_shape): def __init__(self, name, labels, detection_queue, event, model_shape):
self.labels = load_labels(labels) self.labels = labels
self.name = name self.name = name
self.fps = EventsPerSecond() self.fps = EventsPerSecond()
self.detection_queue = detection_queue self.detection_queue = detection_queue

View File

@ -1,20 +1,14 @@
import datetime import datetime
import json
import logging import logging
import os import os
import queue import queue
import subprocess as sp
import threading import threading
import time import time
from collections import defaultdict
from pathlib import Path from pathlib import Path
import psutil from frigate.config import FrigateConfig, RecordConfig
import shutil from frigate.const import CLIPS_DIR
from frigate.models import Event, Recordings
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn from peewee import fn
@ -39,8 +33,16 @@ class EventProcessor(threading.Thread):
if event_data["false_positive"]: if event_data["false_positive"]:
return False return False
# if there are required zones and there is no overlap record_config: RecordConfig = self.config.cameras[camera].record
required_zones = self.config.cameras[camera].clips.required_zones
# Recording clips is disabled
if not record_config.enabled or (
record_config.retain_days == 0 and not record_config.events.enabled
):
return False
# If there are required zones and there is no overlap
required_zones = record_config.events.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set( if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones required_zones
): ):
@ -49,174 +51,16 @@ class EventProcessor(threading.Thread):
) )
return False return False
return True # If the required objects are not present
if (
def refresh_cache(self): record_config.events.objects is not None
cached_files = os.listdir(CACHE_DIR) and event_data["label"] not in record_config.events.objects
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event is more tha max seconds ago, cap it
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
while (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
): ):
logger.warning("More than 90% of the cache is used.") logger.debug(
logger.warning( f"Not creating clip for {event_data['id']} because it did not contain required objects"
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
oldest_clip = min(self.cached_clips.values(), key=lambda x: x["start_time"])
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
) )
return False return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
# if this is the ending clip, add an outpoint
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True return True
def run(self): def run(self):
@ -224,33 +68,19 @@ class EventProcessor(threading.Thread):
try: try:
event_type, camera, event_data = self.event_queue.get(timeout=10) event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty: except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}") logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start": if event_type == "start":
self.events_in_process[event_data["id"]] = event_data self.events_in_process[event_data["id"]] = event_data
if event_type == "end": if event_type == "end":
clips_config = self.config.cameras[camera].clips record_config: RecordConfig = self.config.cameras[camera].record
clip_created = False has_clip = self.should_create_clip(camera, event_data)
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]: if has_clip or event_data["has_snapshot"]:
Event.create( Event.create(
id=event_data["id"], id=event_data["id"],
label=event_data["label"], label=event_data["label"],
@ -261,11 +91,12 @@ class EventProcessor(threading.Thread):
false_positive=event_data["false_positive"], false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]), zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"], thumbnail=event_data["thumbnail"],
has_clip=clip_created, has_clip=has_clip,
has_snapshot=event_data["has_snapshot"], has_snapshot=event_data["has_snapshot"],
) )
del self.events_in_process[event_data["id"]] del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera, clip_created)) self.event_processed_queue.put((event_data["id"], camera, has_clip))
logger.info(f"Exiting event processor...") logger.info(f"Exiting event processor...")
@ -281,7 +112,7 @@ class EventCleanup(threading.Thread):
def expire(self, media_type): def expire(self, media_type):
## Expire events from unlisted cameras based on the global config ## Expire events from unlisted cameras based on the global config
if media_type == "clips": if media_type == "clips":
retain_config = self.config.clips.retain retain_config = self.config.record.events.retain
file_extension = "mp4" file_extension = "mp4"
update_params = {"has_clip": False} update_params = {"has_clip": False}
else: else:
@ -332,7 +163,7 @@ class EventCleanup(threading.Thread):
## Expire events from cameras based on the camera config ## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items(): for name, camera in self.config.cameras.items():
if media_type == "clips": if media_type == "clips":
retain_config = camera.clips.retain retain_config = camera.record.events.retain
else: else:
retain_config = camera.snapshots.retain retain_config = camera.snapshots.retain
# get distinct objects in database for this camera # get distinct objects in database for this camera

View File

@ -6,11 +6,13 @@ import glob
import logging import logging
import os import os
import re import re
import subprocess as sp
import time import time
from functools import reduce from functools import reduce
from pathlib import Path from pathlib import Path
import cv2 import cv2
from flask.helpers import send_file
import numpy as np import numpy as np
from flask import ( from flask import (
@ -185,6 +187,7 @@ def event_thumbnail(id):
@bp.route("/events/<id>/snapshot.jpg") @bp.route("/events/<id>/snapshot.jpg")
def event_snapshot(id): def event_snapshot(id):
download = request.args.get("download", type=bool)
jpg_bytes = None jpg_bytes = None
try: try:
event = Event.get(Event.id == id) event = Event.get(Event.id == id)
@ -220,6 +223,45 @@ def event_snapshot(id):
response = make_response(jpg_bytes) response = make_response(jpg_bytes)
response.headers["Content-Type"] = "image/jpg" response.headers["Content-Type"] = "image/jpg"
if download:
response.headers[
"Content-Disposition"
] = f"attachment; filename=snapshot-{id}.jpg"
return response
@bp.route("/events/<id>/clip.mp4")
def event_clip(id):
download = request.args.get("download", type=bool)
try:
event: Event = Event.get(Event.id == id)
except DoesNotExist:
return "Event not found.", 404
if not event.has_clip:
return "Clip not available", 404
event_config = current_app.frigate_config.cameras[event.camera].record.events
start_ts = event.start_time - event_config.pre_capture
end_ts = event.end_time + event_config.post_capture
file_name = f"{event.camera}-{id}.mp4"
clip_path = os.path.join(CLIPS_DIR, file_name)
if not os.path.isfile(clip_path):
return recording_clip(event.camera, start_ts, end_ts)
response = make_response()
response.headers["Content-Description"] = "File Transfer"
response.headers["Cache-Control"] = "no-cache"
response.headers["Content-Type"] = "video/mp4"
if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(clip_path)
response.headers[
"X-Accel-Redirect"
] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
return response return response
@ -277,7 +319,16 @@ def events():
@bp.route("/config") @bp.route("/config")
def config(): def config():
return jsonify(current_app.frigate_config.dict()) config = current_app.frigate_config.dict()
# add in the ffmpeg_cmds
for camera_name, camera in current_app.frigate_config.cameras.items():
camera_dict = config["cameras"][camera_name]
camera_dict["ffmpeg_cmds"] = camera.ffmpeg_cmds
for cmd in camera_dict["ffmpeg_cmds"]:
cmd["cmd"] = " ".join(cmd["cmd"])
return jsonify(config)
@bp.route("/config/schema") @bp.route("/config/schema")
@ -508,19 +559,87 @@ def recordings(camera_name):
) )
@bp.route("/vod/<year_month>/<day>/<hour>/<camera>") @bp.route("/<camera>/start/<int:start_ts>/end/<int:end_ts>/clip.mp4")
def vod(year_month, day, hour, camera): @bp.route("/<camera>/start/<float:start_ts>/end/<float:end_ts>/clip.mp4")
start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H") def recording_clip(camera, start_ts, end_ts):
end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1) download = request.args.get("download", type=bool)
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
# Select all recordings where either the start or end dates fall in the requested hour
recordings = ( recordings = (
Recordings.select() Recordings.select()
.where( .where(
(Recordings.start_time.between(start_ts, end_ts)) (Recordings.start_time.between(start_ts, end_ts))
| (Recordings.end_time.between(start_ts, end_ts)) | (Recordings.end_time.between(start_ts, end_ts))
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
)
.where(Recordings.camera == camera)
.order_by(Recordings.start_time.asc())
)
playlist_lines = []
clip: Recordings
for clip in recordings:
playlist_lines.append(f"file '{clip.path}'")
# if this is the starting clip, add an inpoint
if clip.start_time < start_ts:
playlist_lines.append(f"inpoint {int(start_ts - clip.start_time)}")
# if this is the ending clip, add an outpoint
if clip.end_time > end_ts:
playlist_lines.append(f"outpoint {int(end_ts - clip.start_time)}")
file_name = f"clip_{camera}_{start_ts}-{end_ts}.mp4"
path = f"/tmp/cache/{file_name}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
path,
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return f"Could not create clip from recordings for {camera}.", 500
response = make_response()
response.headers["Content-Description"] = "File Transfer"
response.headers["Cache-Control"] = "no-cache"
response.headers["Content-Type"] = "video/mp4"
if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(path)
response.headers[
"X-Accel-Redirect"
] = f"/cache/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile
return response
@bp.route("/vod/<camera>/start/<int:start_ts>/end/<int:end_ts>")
@bp.route("/vod/<camera>/start/<float:start_ts>/end/<float:end_ts>")
def vod_ts(camera, start_ts, end_ts):
recordings = (
Recordings.select()
.where(
Recordings.start_time.between(start_ts, end_ts)
| Recordings.end_time.between(start_ts, end_ts)
| ((start_ts > Recordings.start_time) & (end_ts < Recordings.end_time))
) )
.where(Recordings.camera == camera) .where(Recordings.camera == camera)
.order_by(Recordings.start_time.asc()) .order_by(Recordings.start_time.asc())
@ -544,9 +663,13 @@ def vod(year_month, day, hour, camera):
clips.append(clip) clips.append(clip)
durations.append(duration) durations.append(duration)
if not clips:
return "No recordings found.", 404
hour_ago = datetime.now() - timedelta(hours=1)
return jsonify( return jsonify(
{ {
"cache": datetime.now() - timedelta(hours=1) > start_date, "cache": hour_ago.timestamp() > start_ts,
"discontinuity": False, "discontinuity": False,
"durations": durations, "durations": durations,
"sequences": [{"clips": clips}], "sequences": [{"clips": clips}],
@ -554,6 +677,45 @@ def vod(year_month, day, hour, camera):
) )
@bp.route("/vod/<year_month>/<day>/<hour>/<camera>")
def vod_hour(year_month, day, hour, camera):
start_date = datetime.strptime(f"{year_month}-{day} {hour}", "%Y-%m-%d %H")
end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
return vod_ts(camera, start_ts, end_ts)
@bp.route("/vod/event/<id>")
def vod_event(id):
try:
event: Event = Event.get(Event.id == id)
except DoesNotExist:
return "Event not found.", 404
if not event.has_clip:
return "Clip not available", 404
event_config = current_app.frigate_config.cameras[event.camera].record.events
start_ts = event.start_time - event_config.pre_capture
end_ts = event.end_time + event_config.post_capture
clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4")
if not os.path.isfile(clip_path):
return vod_ts(event.camera, start_ts, end_ts)
duration = int((end_ts - start_ts) * 1000)
return jsonify(
{
"cache": True,
"discontinuity": False,
"durations": [duration],
"sequences": [{"clips": [{"type": "source", "path": clip_path}]}],
}
)
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options): def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True: while True:
# max out at specified FPS # max out at specified FPS

View File

@ -13,6 +13,7 @@ from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.websocket import WebSocket from ws4py.websocket import WebSocket
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.util import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -20,22 +21,22 @@ logger = logging.getLogger(__name__)
def create_mqtt_client(config: FrigateConfig, camera_metrics): def create_mqtt_client(config: FrigateConfig, camera_metrics):
mqtt_config = config.mqtt mqtt_config = config.mqtt
def on_clips_command(client, userdata, message): def on_recordings_command(client, userdata, message):
payload = message.payload.decode() payload = message.payload.decode()
logger.debug(f"on_clips_toggle: {message.topic} {payload}") logger.debug(f"on_recordings_toggle: {message.topic} {payload}")
camera_name = message.topic.split("/")[-3] camera_name = message.topic.split("/")[-3]
clips_settings = config.cameras[camera_name].clips record_settings = config.cameras[camera_name].record
if payload == "ON": if payload == "ON":
if not clips_settings.enabled: if not record_settings.enabled:
logger.info(f"Turning on clips for {camera_name} via mqtt") logger.info(f"Turning on recordings for {camera_name} via mqtt")
clips_settings.enabled = True record_settings.enabled = True
elif payload == "OFF": elif payload == "OFF":
if clips_settings.enabled: if record_settings.enabled:
logger.info(f"Turning off clips for {camera_name} via mqtt") logger.info(f"Turning off recordings for {camera_name} via mqtt")
clips_settings.enabled = False record_settings.enabled = False
else: else:
logger.warning(f"Received unsupported value at {message.topic}: {payload}") logger.warning(f"Received unsupported value at {message.topic}: {payload}")
@ -88,6 +89,9 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
state_topic = f"{message.topic[:-4]}/state" state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True) client.publish(state_topic, payload, retain=True)
def on_restart_command(client, userdata, message):
restart_frigate()
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
threading.current_thread().name = "mqtt" threading.current_thread().name = "mqtt"
if rc != 0: if rc != 0:
@ -116,7 +120,7 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
# register callbacks # register callbacks
for name in config.cameras.keys(): for name in config.cameras.keys():
client.message_callback_add( client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command f"{mqtt_config.topic_prefix}/{name}/recordings/set", on_recordings_command
) )
client.message_callback_add( client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command
@ -125,6 +129,10 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command
) )
client.message_callback_add(
f"{mqtt_config.topic_prefix}/restart", on_restart_command
)
if not mqtt_config.tls_ca_certs is None: if not mqtt_config.tls_ca_certs is None:
if ( if (
not mqtt_config.tls_client_cert is None not mqtt_config.tls_client_cert is None
@ -151,8 +159,8 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
for name in config.cameras.keys(): for name in config.cameras.keys():
client.publish( client.publish(
f"{mqtt_config.topic_prefix}/{name}/clips/state", f"{mqtt_config.topic_prefix}/{name}/recordings/state",
"ON" if config.cameras[name].clips.enabled else "OFF", "ON" if config.cameras[name].record.enabled else "OFF",
retain=True, retain=True,
) )
client.publish( client.publish(
@ -184,7 +192,7 @@ class MqttSocketRelay:
json_message = json.loads(message.data.decode("utf-8")) json_message = json.loads(message.data.decode("utf-8"))
json_message = { json_message = {
"topic": f"{self.topic_prefix}/{json_message['topic']}", "topic": f"{self.topic_prefix}/{json_message['topic']}",
"payload": json_message["payload"], "payload": json_message.get("payload"),
"retain": json_message.get("retain", False), "retain": json_message.get("retain", False),
} }
except Exception as e: except Exception as e:

View File

@ -1,5 +1,5 @@
import copy
import base64 import base64
import copy
import datetime import datetime
import hashlib import hashlib
import itertools import itertools
@ -14,30 +14,20 @@ from statistics import mean, median
from typing import Callable, Dict from typing import Callable, Dict
import cv2 import cv2
import matplotlib.pyplot as plt
import numpy as np import numpy as np
from frigate.config import FrigateConfig, CameraConfig from frigate.config import CameraConfig, FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.edgetpu import load_labels from frigate.edgetpu import load_labels
from frigate.util import ( from frigate.util import (
SharedMemoryFrameManager, SharedMemoryFrameManager,
calculate_region,
draw_box_with_label, draw_box_with_label,
draw_timestamp, draw_timestamp,
calculate_region,
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
PATH_TO_LABELS = "/labelmap.txt"
LABELS = load_labels(PATH_TO_LABELS)
cmap = plt.cm.get_cmap("tab10", len(LABELS.keys()))
COLOR_MAP = {}
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
def on_edge(box, frame_shape): def on_edge(box, frame_shape):
if ( if (
@ -72,9 +62,12 @@ def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
class TrackedObject: class TrackedObject:
def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data): def __init__(
self, camera, colormap, camera_config: CameraConfig, frame_cache, obj_data
):
self.obj_data = obj_data self.obj_data = obj_data
self.camera = camera self.camera = camera
self.colormap = colormap
self.camera_config = camera_config self.camera_config = camera_config
self.frame_cache = frame_cache self.frame_cache = frame_cache
self.current_zones = [] self.current_zones = []
@ -107,6 +100,7 @@ class TrackedObject:
def update(self, current_frame_time, obj_data): def update(self, current_frame_time, obj_data):
significant_update = False significant_update = False
zone_change = False
self.obj_data.update(obj_data) self.obj_data.update(obj_data)
# if the object is not in the current frame, add a 0.0 to the score history # if the object is not in the current frame, add a 0.0 to the score history
if self.obj_data["frame_time"] != current_frame_time: if self.obj_data["frame_time"] != current_frame_time:
@ -142,6 +136,9 @@ class TrackedObject:
bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3]) bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3])
# check each zone # check each zone
for name, zone in self.camera_config.zones.items(): for name, zone in self.camera_config.zones.items():
# if the zone is not for this object type, skip
if len(zone.objects) > 0 and not self.obj_data["label"] in zone.objects:
continue
contour = zone.contour contour = zone.contour
# check if the object is in the zone # check if the object is in the zone
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0: if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
@ -152,10 +149,10 @@ class TrackedObject:
# if the zones changed, signal an update # if the zones changed, signal an update
if not self.false_positive and set(self.current_zones) != set(current_zones): if not self.false_positive and set(self.current_zones) != set(current_zones):
significant_update = True zone_change = True
self.current_zones = current_zones self.current_zones = current_zones
return significant_update return (significant_update, zone_change)
def to_dict(self, include_thumbnail: bool = False): def to_dict(self, include_thumbnail: bool = False):
snapshot_time = ( snapshot_time = (
@ -243,7 +240,7 @@ class TrackedObject:
if bounding_box: if bounding_box:
thickness = 2 thickness = 2
color = COLOR_MAP[self.obj_data["label"]] color = self.colormap[self.obj_data["label"]]
# draw the bounding boxes on the frame # draw the bounding boxes on the frame
box = self.thumbnail_data["box"] box = self.thumbnail_data["box"]
@ -318,7 +315,9 @@ def zone_filtered(obj: TrackedObject, object_config):
# Maintains the state of a camera # Maintains the state of a camera
class CameraState: class CameraState:
def __init__(self, name, config, frame_manager): def __init__(
self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager
):
self.name = name self.name = name
self.config = config self.config = config
self.camera_config = config.cameras[name] self.camera_config = config.cameras[name]
@ -351,7 +350,7 @@ class CameraState:
for obj in tracked_objects.values(): for obj in tracked_objects.values():
if obj["frame_time"] == frame_time: if obj["frame_time"] == frame_time:
thickness = 2 thickness = 2
color = COLOR_MAP[obj["label"]] color = self.config.model.colormap[obj["label"]]
else: else:
thickness = 1 thickness = 1
color = (255, 0, 0) color = (255, 0, 0)
@ -392,7 +391,7 @@ class CameraState:
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness) cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
if draw_options.get("mask"): if draw_options.get("mask"):
mask_overlay = np.where(self.camera_config.motion_mask == [0]) mask_overlay = np.where(self.camera_config.motion.mask == [0])
frame_copy[mask_overlay] = [0, 0, 0] frame_copy[mask_overlay] = [0, 0, 0]
if draw_options.get("motion_boxes"): if draw_options.get("motion_boxes"):
@ -442,7 +441,11 @@ class CameraState:
for id in new_ids: for id in new_ids:
new_obj = tracked_objects[id] = TrackedObject( new_obj = tracked_objects[id] = TrackedObject(
self.name, self.camera_config, self.frame_cache, current_detections[id] self.name,
self.config.model.colormap,
self.camera_config,
self.frame_cache,
current_detections[id],
) )
# call event handlers # call event handlers
@ -451,7 +454,9 @@ class CameraState:
for id in updated_ids: for id in updated_ids:
updated_obj = tracked_objects[id] updated_obj = tracked_objects[id]
significant_update = updated_obj.update(frame_time, current_detections[id]) significant_update, zone_change = updated_obj.update(
frame_time, current_detections[id]
)
if significant_update: if significant_update:
# ensure this frame is stored in the cache # ensure this frame is stored in the cache
@ -464,11 +469,12 @@ class CameraState:
updated_obj.last_updated = frame_time updated_obj.last_updated = frame_time
# if it has been more than 5 seconds since the last publish # if it has been more than 5 seconds since the last publish
# and the last update is greater than the last publish # and the last update is greater than the last publish or
# the object has changed zones
if ( if (
frame_time - updated_obj.last_published > 5 frame_time - updated_obj.last_published > 5
and updated_obj.last_updated > updated_obj.last_published and updated_obj.last_updated > updated_obj.last_published
): ) or zone_change:
# call event handlers # call event handlers
for c in self.callbacks["update"]: for c in self.callbacks["update"]:
c(self.name, updated_obj, frame_time) c(self.name, updated_obj, frame_time)

View File

@ -159,9 +159,16 @@ class BirdsEyeFrameManager:
frame = None frame = None
channel_dims = None channel_dims = None
else: else:
try:
frame = self.frame_manager.get( frame = self.frame_manager.get(
f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv f"{camera}{frame_time}", self.config.cameras[camera].frame_shape_yuv
) )
except FileNotFoundError:
# TODO: better frame management would prevent this edge case
logger.warning(
f"Unable to copy frame {camera}{frame_time} to birdseye."
)
return
channel_dims = self.cameras[camera]["channel_dims"] channel_dims = self.cameras[camera]["channel_dims"]
copy_yuv_to_position( copy_yuv_to_position(
@ -346,10 +353,14 @@ def output_frames(config: FrigateConfig, video_output_queue):
broadcasters = {} broadcasters = {}
for camera, cam_config in config.cameras.items(): for camera, cam_config in config.cameras.items():
width = int(
cam_config.live.height
* (cam_config.frame_shape[1] / cam_config.frame_shape[0])
)
converters[camera] = FFMpegConverter( converters[camera] = FFMpegConverter(
cam_config.frame_shape[1], cam_config.frame_shape[1],
cam_config.frame_shape[0], cam_config.frame_shape[0],
cam_config.live.width, width,
cam_config.live.height, cam_config.live.height,
cam_config.live.quality, cam_config.live.quality,
) )

View File

@ -14,7 +14,7 @@ import numpy as np
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
from frigate.edgetpu import LocalObjectDetector from frigate.edgetpu import LocalObjectDetector
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
from frigate.object_processing import COLOR_MAP, CameraState from frigate.object_processing import CameraState
from frigate.objects import ObjectTracker from frigate.objects import ObjectTracker
from frigate.util import ( from frigate.util import (
DictFrameManager, DictFrameManager,

View File

@ -3,6 +3,7 @@ import itertools
import logging import logging
import os import os
import random import random
import shutil
import string import string
import subprocess as sp import subprocess as sp
import threading import threading
@ -10,9 +11,11 @@ from pathlib import Path
import psutil import psutil
from peewee import JOIN
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Recordings from frigate.models import Event, Recordings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -45,8 +48,10 @@ class RecordingMaintainer(threading.Thread):
def move_files(self): def move_files(self):
recordings = [ recordings = [
d d
for d in os.listdir(RECORD_DIR) for d in os.listdir(CACHE_DIR)
if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4") if os.path.isfile(os.path.join(CACHE_DIR, d))
and d.endswith(".mp4")
and not d.startswith("clip_")
] ]
files_in_use = [] files_in_use = []
@ -57,19 +62,26 @@ class RecordingMaintainer(threading.Thread):
flist = process.open_files() flist = process.open_files()
if flist: if flist:
for nt in flist: for nt in flist:
if nt.path.startswith(RECORD_DIR): if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1]) files_in_use.append(nt.path.split("/")[-1])
except: except:
continue continue
for f in recordings: for f in recordings:
# Skip files currently in use
if f in files_in_use: if f in files_in_use:
continue continue
cache_path = os.path.join(CACHE_DIR, f)
basename = os.path.splitext(f)[0] basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1) camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S") start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
# Just delete files if recordings are turned off
if not self.config.cameras[camera].record.enabled:
Path(cache_path).unlink(missing_ok=True)
continue
ffprobe_cmd = [ ffprobe_cmd = [
"ffprobe", "ffprobe",
"-v", "-v",
@ -78,7 +90,7 @@ class RecordingMaintainer(threading.Thread):
"format=duration", "format=duration",
"-of", "-of",
"default=noprint_wrappers=1:nokey=1", "default=noprint_wrappers=1:nokey=1",
f"{os.path.join(RECORD_DIR, f)}", f"{cache_path}",
] ]
p = sp.run(ffprobe_cmd, capture_output=True) p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0: if p.returncode == 0:
@ -86,7 +98,7 @@ class RecordingMaintainer(threading.Thread):
end_time = start_time + datetime.timedelta(seconds=duration) end_time = start_time + datetime.timedelta(seconds=duration)
else: else:
logger.info(f"bad file: {f}") logger.info(f"bad file: {f}")
os.remove(os.path.join(RECORD_DIR, f)) Path(cache_path).unlink(missing_ok=True)
continue continue
directory = os.path.join( directory = os.path.join(
@ -99,7 +111,9 @@ class RecordingMaintainer(threading.Thread):
file_name = f"{start_time.strftime('%M.%S.mp4')}" file_name = f"{start_time.strftime('%M.%S.mp4')}"
file_path = os.path.join(directory, file_name) file_path = os.path.join(directory, file_name)
os.rename(os.path.join(RECORD_DIR, f), file_path) # copy then delete is required when recordings are stored on some network drives
shutil.copyfile(cache_path, file_path)
os.remove(cache_path)
rand_id = "".join( rand_id = "".join(
random.choices(string.ascii_lowercase + string.digits, k=6) random.choices(string.ascii_lowercase + string.digits, k=6)
@ -113,30 +127,166 @@ class RecordingMaintainer(threading.Thread):
duration=duration, duration=duration,
) )
def run(self):
# Check for new files every 5 seconds
while not self.stop_event.wait(5):
self.move_files()
logger.info(f"Exiting recording maintenance...")
class RecordingCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "recording_cleanup"
self.config = config
self.stop_event = stop_event
def clean_tmp_clips(self):
# delete any clips more than 5 minutes old
for p in Path("/tmp/cache").rglob("clip_*.mp4"):
logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
logger.debug("Deleting tmp clip.")
p.unlink(missing_ok=True)
def expire_recordings(self):
logger.debug("Start expire recordings (new).")
logger.debug("Start deleted cameras.")
# Handle deleted cameras
no_camera_recordings: Recordings = Recordings.select().where(
Recordings.camera.not_in(list(self.config.cameras.keys())),
)
for recording in no_camera_recordings:
expire_days = self.config.record.retain_days
expire_before = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
if recording.end_time < expire_before:
Path(recording.path).unlink(missing_ok=True)
Recordings.delete_by_id(recording.id)
logger.debug("End deleted cameras.")
logger.debug("Start all cameras.")
for camera, config in self.config.cameras.items():
logger.debug(f"Start camera: {camera}.")
# When deleting recordings without events, we have to keep at LEAST the configured max clip duration
min_end = (
datetime.datetime.now()
- datetime.timedelta(seconds=config.record.events.max_seconds)
).timestamp()
expire_days = config.record.retain_days
expire_before = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
expire_date = min(min_end, expire_before)
# Get recordings to remove
recordings: Recordings = Recordings.select().where(
Recordings.camera == camera,
Recordings.end_time < expire_date,
)
for recording in recordings:
# See if there are any associated events
events: Event = Event.select().where(
Event.camera == recording.camera,
(
Event.start_time.between(
recording.start_time, recording.end_time
)
| Event.end_time.between(
recording.start_time, recording.end_time
)
| (
(recording.start_time > Event.start_time)
& (recording.end_time < Event.end_time)
)
),
)
keep = False
event_ids = set()
event: Event
for event in events:
event_ids.add(event.id)
# Check event/label retention and keep the recording if within window
expire_days_event = (
0
if not config.record.events.enabled
else config.record.events.retain.objects.get(
event.label, config.record.events.retain.default
)
)
expire_before_event = (
datetime.datetime.now()
- datetime.timedelta(days=expire_days_event)
).timestamp()
if recording.end_time >= expire_before_event:
keep = True
# Delete recordings outside of the retention window
if not keep:
Path(recording.path).unlink(missing_ok=True)
Recordings.delete_by_id(recording.id)
if event_ids:
# Update associated events
Event.update(has_clip=False).where(
Event.id.in_(list(event_ids))
).execute()
logger.debug(f"End camera: {camera}.")
logger.debug("End all cameras.")
logger.debug("End expire recordings (new).")
def expire_files(self): def expire_files(self):
logger.debug("Start expire files (legacy).")
shortest_retention = self.config.record.retain_days
default_expire = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * self.config.record.retain_days
)
delete_before = {} delete_before = {}
for name, camera in self.config.cameras.items(): for name, camera in self.config.cameras.items():
delete_before[name] = ( delete_before[name] = (
datetime.datetime.now().timestamp() datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * camera.record.retain_days - SECONDS_IN_DAY * camera.record.retain_days
) )
if camera.record.retain_days < shortest_retention:
shortest_retention = camera.record.retain_days
for p in Path("/media/frigate/recordings").rglob("*.mp4"): logger.debug(f"Shortest retention: {shortest_retention}")
if not p.parent.name in delete_before: process = sp.run(
["find", RECORD_DIR, "-type", "f", "-mtime", f"+{shortest_retention}"],
capture_output=True,
text=True,
)
files_to_check = process.stdout.splitlines()
for f in files_to_check:
p = Path(f)
# Ignore files that have a record in the recordings DB
if Recordings.select().where(Recordings.path == str(p)).count():
continue continue
if p.stat().st_mtime < delete_before[p.parent.name]: if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
Recordings.delete().where(Recordings.path == str(p)).execute()
p.unlink(missing_ok=True) p.unlink(missing_ok=True)
logger.debug("End expire files (legacy).")
def run(self): def run(self):
# Expire recordings every minute, clean directories every hour.
for counter in itertools.cycle(range(60)): for counter in itertools.cycle(range(60)):
if self.stop_event.wait(10): if self.stop_event.wait(60):
logger.info(f"Exiting recording maintenance...") logger.info(f"Exiting recording cleanup...")
break break
# only expire events every 10 minutes, but check for new files every 10 seconds self.expire_recordings()
self.clean_tmp_clips()
if counter == 0: if counter == 0:
self.expire_files() self.expire_files()
remove_empty_directories(RECORD_DIR) remove_empty_directories(RECORD_DIR)
self.move_files()

View File

@ -18,8 +18,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -42,8 +45,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -60,8 +66,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -82,8 +91,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": {"track": ["cat"]}, "objects": {"track": ["cat"]},
} }
}, },
@ -105,8 +117,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -130,8 +145,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -152,8 +170,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": { "objects": {
"track": ["person", "dog"], "track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}}, "filters": {"dog": {"threshold": 0.7}},
@ -179,8 +200,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": { "objects": {
"mask": "0,0,1,1,0,1", "mask": "0,0,1,1,0,1",
"filters": {"dog": {"mask": "1,1,1,1,1,1"}}, "filters": {"dog": {"mask": "1,1,1,1,1,1"}},
@ -197,6 +221,34 @@ class TestConfig(unittest.TestCase):
assert len(back_camera.objects.filters["dog"].raw_mask) == 2 assert len(back_camera.objects.filters["dog"].raw_mask) == 2
assert len(back_camera.objects.filters["person"].raw_mask) == 1 assert len(back_camera.objects.filters["person"].raw_mask) == 1
def test_default_input_args(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
assert "-rtsp_transport" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_global(self): def test_ffmpeg_params_global(self):
config = { config = {
"ffmpeg": {"input_args": "-re"}, "ffmpeg": {"input_args": "-re"},
@ -208,8 +260,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": { "objects": {
"track": ["person", "dog"], "track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}}, "filters": {"dog": {"threshold": 0.7}},
@ -235,8 +290,11 @@ class TestConfig(unittest.TestCase):
], ],
"input_args": ["-re"], "input_args": ["-re"],
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": { "objects": {
"track": ["person", "dog"], "track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}}, "filters": {"dog": {"threshold": 0.7}},
@ -267,8 +325,11 @@ class TestConfig(unittest.TestCase):
], ],
"input_args": "test3", "input_args": "test3",
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"objects": { "objects": {
"track": ["person", "dog"], "track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}}, "filters": {"dog": {"threshold": 0.7}},
@ -288,7 +349,9 @@ class TestConfig(unittest.TestCase):
def test_inherit_clips_retention(self): def test_inherit_clips_retention(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}}, "record": {
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
"cameras": { "cameras": {
"back": { "back": {
"ffmpeg": { "ffmpeg": {
@ -296,8 +359,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -305,12 +371,16 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True) assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config runtime_config = frigate_config.runtime_config
assert runtime_config.cameras["back"].clips.retain.objects["person"] == 30 assert (
runtime_config.cameras["back"].record.events.retain.objects["person"] == 30
)
def test_roles_listed_twice_throws_error(self): def test_roles_listed_twice_throws_error(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}}, "record": {
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
"cameras": { "cameras": {
"back": { "back": {
"ffmpeg": { "ffmpeg": {
@ -319,8 +389,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video2", "roles": ["detect"]}, {"path": "rtsp://10.0.0.1:554/video2", "roles": ["detect"]},
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -329,7 +402,9 @@ class TestConfig(unittest.TestCase):
def test_zone_matching_camera_name_throws_error(self): def test_zone_matching_camera_name_throws_error(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}}, "record": {
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
"cameras": { "cameras": {
"back": { "back": {
"ffmpeg": { "ffmpeg": {
@ -337,8 +412,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"zones": {"back": {"coordinates": "1,1,1,1,1,1"}}, "zones": {"back": {"coordinates": "1,1,1,1,1,1"}},
} }
}, },
@ -348,7 +426,9 @@ class TestConfig(unittest.TestCase):
def test_zone_assigns_color_and_contour(self): def test_zone_assigns_color_and_contour(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}}, "record": {
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
"cameras": { "cameras": {
"back": { "back": {
"ffmpeg": { "ffmpeg": {
@ -356,8 +436,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
"zones": {"test": {"coordinates": "1,1,1,1,1,1"}}, "zones": {"test": {"coordinates": "1,1,1,1,1,1"}},
} }
}, },
@ -374,7 +457,9 @@ class TestConfig(unittest.TestCase):
def test_clips_should_default_to_global_objects(self): def test_clips_should_default_to_global_objects(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}}, "record": {
"events": {"retain": {"default": 20, "objects": {"person": 30}}}
},
"objects": {"track": ["person", "dog"]}, "objects": {"track": ["person", "dog"]},
"cameras": { "cameras": {
"back": { "back": {
@ -383,9 +468,12 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"clips": {"enabled": True}, "fps": 5,
},
"record": {"events": {"enabled": True}},
} }
}, },
} }
@ -394,8 +482,8 @@ class TestConfig(unittest.TestCase):
runtime_config = frigate_config.runtime_config runtime_config = frigate_config.runtime_config
back_camera = runtime_config.cameras["back"] back_camera = runtime_config.cameras["back"]
assert back_camera.clips.objects is None assert back_camera.record.events.objects is None
assert back_camera.clips.retain.objects["person"] == 30 assert back_camera.record.events.retain.objects["person"] == 30
def test_role_assigned_but_not_enabled(self): def test_role_assigned_but_not_enabled(self):
config = { config = {
@ -411,8 +499,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]}, {"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]},
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -438,9 +529,12 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"enabled": True,
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"detect": {"enabled": True}, "fps": 5,
},
} }
}, },
} }
@ -465,8 +559,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"height": 480, "detect": {
"width": 640, "height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -491,8 +588,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080, "height": 1080,
"width": 1920, "width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -501,7 +601,96 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True) assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config runtime_config = frigate_config.runtime_config
assert round(runtime_config.cameras["back"].motion.contour_area) == 225 assert round(runtime_config.cameras["back"].motion.contour_area) == 99
def test_merge_labelmap(self):
config = {
"mqtt": {"host": "mqtt"},
"model": {"labelmap": {7: "truck"}},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
assert runtime_config.model.merged_labelmap[7] == "truck"
def test_default_labelmap_empty(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
assert runtime_config.model.merged_labelmap[0] == "person"
def test_default_labelmap(self):
config = {
"mqtt": {"host": "mqtt"},
"model": {"width": 320, "height": 320},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config
assert runtime_config.model.merged_labelmap[0] == "person"
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -17,6 +17,7 @@ from typing import AnyStr
import cv2 import cv2
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import os
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -518,6 +519,10 @@ def clipped(obj, frame_shape):
return False return False
def restart_frigate():
os.kill(os.getpid(), signal.SIGTERM)
class EventsPerSecond: class EventsPerSecond:
def __init__(self, max_events=1000): def __init__(self, max_events=1000):
self._start = None self._start = None

View File

@ -216,6 +216,13 @@ class CameraWatchdog(threading.Thread):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():
self.logger.error(
f"FFMPEG process crashed unexpectedly for {self.camera_name}."
)
self.logger.error(
"The following ffmpeg logs include the last 100 lines prior to exit."
)
self.logger.error("You may have invalid args defined for this camera.")
self.logpipe.dump() self.logpipe.dump()
self.start_ffmpeg_detect() self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20: elif now - self.capture_thread.current_frame.value > 20:
@ -318,6 +325,7 @@ def track_camera(
name, name,
config: CameraConfig, config: CameraConfig,
model_shape, model_shape,
labelmap,
detection_queue, detection_queue,
result_connection, result_connection,
detected_objects_queue, detected_objects_queue,
@ -344,7 +352,7 @@ def track_camera(
motion_detector = MotionDetector(frame_shape, config.motion) motion_detector = MotionDetector(frame_shape, config.motion)
object_detector = RemoteObjectDetector( object_detector = RemoteObjectDetector(
name, "/labelmap.txt", detection_queue, result_connection, model_shape name, labelmap, detection_queue, result_connection, model_shape
) )
object_tracker = ObjectTracker(config.detect) object_tracker = ObjectTracker(config.detect)

View File

@ -23,7 +23,7 @@ export default function App() {
) : ( ) : (
<div className="flex flex-row min-h-screen w-full bg-white dark:bg-gray-900 text-gray-900 dark:text-white"> <div className="flex flex-row min-h-screen w-full bg-white dark:bg-gray-900 text-gray-900 dark:text-white">
<Sidebar /> <Sidebar />
<div className="w-full flex-auto p-2 mt-24 px-4 min-w-0"> <div className="w-full flex-auto p-2 mt-16 px-4 min-w-0">
<Router> <Router>
<AsyncRoute path="/cameras/:camera/editor" getComponent={Routes.getCameraMap} /> <AsyncRoute path="/cameras/:camera/editor" getComponent={Routes.getCameraMap} />
<AsyncRoute path="/cameras/:camera" getComponent={Routes.getCamera} /> <AsyncRoute path="/cameras/:camera" getComponent={Routes.getCamera} />

View File

@ -5,12 +5,18 @@ import Menu, { MenuItem, MenuSeparator } from './components/Menu';
import AutoAwesomeIcon from './icons/AutoAwesome'; import AutoAwesomeIcon from './icons/AutoAwesome';
import LightModeIcon from './icons/LightMode'; import LightModeIcon from './icons/LightMode';
import DarkModeIcon from './icons/DarkMode'; import DarkModeIcon from './icons/DarkMode';
import FrigateRestartIcon from './icons/FrigateRestart';
import Dialog from './components/Dialog';
import { useDarkMode } from './context'; import { useDarkMode } from './context';
import { useCallback, useRef, useState } from 'preact/hooks'; import { useCallback, useRef, useState } from 'preact/hooks';
import { useRestart } from './api/mqtt';
export default function AppBar() { export default function AppBar() {
const [showMoreMenu, setShowMoreMenu] = useState(false); const [showMoreMenu, setShowMoreMenu] = useState(false);
const [showDialog, setShowDialog] = useState(false);
const [showDialogWait, setShowDialogWait] = useState(false);
const { setDarkMode } = useDarkMode(); const { setDarkMode } = useDarkMode();
const { send: sendRestart } = useRestart();
const handleSelectDarkMode = useCallback( const handleSelectDarkMode = useCallback(
(value, label) => { (value, label) => {
@ -30,6 +36,21 @@ export default function AppBar() {
setShowMoreMenu(false); setShowMoreMenu(false);
}, [setShowMoreMenu]); }, [setShowMoreMenu]);
const handleClickRestartDialog = useCallback(() => {
setShowDialog(false);
setShowDialogWait(true);
sendRestart();
}, [setShowDialog]); // eslint-disable-line react-hooks/exhaustive-deps
const handleDismissRestartDialog = useCallback(() => {
setShowDialog(false);
}, [setShowDialog]);
const handleRestart = useCallback(() => {
setShowMoreMenu(false);
setShowDialog(true);
}, [setShowDialog]);
return ( return (
<Fragment> <Fragment>
<BaseAppBar title={LinkedLogo} overflowRef={moreRef} onOverflowClick={handleShowMenu} /> <BaseAppBar title={LinkedLogo} overflowRef={moreRef} onOverflowClick={handleShowMenu} />
@ -39,8 +60,27 @@ export default function AppBar() {
<MenuSeparator /> <MenuSeparator />
<MenuItem icon={LightModeIcon} label="Light" value="light" onSelect={handleSelectDarkMode} /> <MenuItem icon={LightModeIcon} label="Light" value="light" onSelect={handleSelectDarkMode} />
<MenuItem icon={DarkModeIcon} label="Dark" value="dark" onSelect={handleSelectDarkMode} /> <MenuItem icon={DarkModeIcon} label="Dark" value="dark" onSelect={handleSelectDarkMode} />
<MenuSeparator />
<MenuItem icon={FrigateRestartIcon} label="Restart Frigate" onSelect={handleRestart} />
</Menu> </Menu>
) : null} ) : null}
{showDialog ? (
<Dialog
onDismiss={handleDismissRestartDialog}
title="Restart Frigate"
text="Are you sure?"
actions={[
{ text: 'Yes', color: 'red', onClick: handleClickRestartDialog },
{ text: 'Cancel', onClick: handleDismissRestartDialog },
]}
/>
) : null}
{showDialogWait ? (
<Dialog
title="Restart in progress"
text="Please wait a few seconds for the restart to complete before reloading the page."
/>
) : null}
</Fragment> </Fragment>
); );
} }

View File

@ -107,12 +107,12 @@ describe('MqttProvider', () => {
); );
}); });
test('prefills the clips/detect/snapshots state from config', async () => { test('prefills the recordings/detect/snapshots state from config', async () => {
jest.spyOn(Date, 'now').mockReturnValue(123456); jest.spyOn(Date, 'now').mockReturnValue(123456);
const config = { const config = {
cameras: { cameras: {
front: { name: 'front', detect: { enabled: true }, clips: { enabled: false }, snapshots: { enabled: true } }, front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } },
side: { name: 'side', detect: { enabled: false }, clips: { enabled: false }, snapshots: { enabled: false } }, side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false } },
}, },
}; };
render( render(
@ -122,10 +122,10 @@ describe('MqttProvider', () => {
); );
await screen.findByTestId('data'); await screen.findByTestId('data');
expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}'); expect(screen.getByTestId('front/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
expect(screen.getByTestId('front/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('front/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}'); expect(screen.getByTestId('front/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"ON"}');
expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('side/detect/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
expect(screen.getByTestId('side/clips/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('side/recordings/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}'); expect(screen.getByTestId('side/snapshots/state')).toHaveTextContent('{"lastUpdate":123456,"payload":"OFF"}');
}); });
}); });

View File

@ -41,8 +41,8 @@ export function MqttProvider({
useEffect(() => { useEffect(() => {
Object.keys(config.cameras).forEach((camera) => { Object.keys(config.cameras).forEach((camera) => {
const { name, clips, detect, snapshots } = config.cameras[camera]; const { name, record, detect, snapshots } = config.cameras[camera];
dispatch({ topic: `${name}/clips/state`, payload: clips.enabled ? 'ON' : 'OFF' }); dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF' });
dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF' }); dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF' });
dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF' }); dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF' });
}); });
@ -101,12 +101,12 @@ export function useDetectState(camera) {
return { payload, send, connected }; return { payload, send, connected };
} }
export function useClipsState(camera) { export function useRecordingsState(camera) {
const { const {
value: { payload }, value: { payload },
send, send,
connected, connected,
} = useMqtt(`${camera}/clips/state`, `${camera}/clips/set`); } = useMqtt(`${camera}/recordings/state`, `${camera}/recordings/set`);
return { payload, send, connected }; return { payload, send, connected };
} }
@ -118,3 +118,12 @@ export function useSnapshotsState(camera) {
} = useMqtt(`${camera}/snapshots/state`, `${camera}/snapshots/set`); } = useMqtt(`${camera}/snapshots/state`, `${camera}/snapshots/set`);
return { payload, send, connected }; return { payload, send, connected };
} }
export function useRestart() {
const {
value: { payload },
send,
connected,
} = useMqtt('restart', 'restart');
return { payload, send, connected };
}

View File

@ -37,13 +37,13 @@ export default function AppBar({ title: Title, overflowRef, onOverflowClick }) {
return ( return (
<div <div
className={`w-full border-b border-gray-200 dark:border-gray-700 flex items-center align-middle p-4 space-x-2 fixed left-0 right-0 z-10 bg-white dark:bg-gray-900 transform transition-all duration-200 ${ className={`w-full border-b border-gray-200 dark:border-gray-700 flex items-center align-middle p-2 fixed left-0 right-0 z-10 bg-white dark:bg-gray-900 transform transition-all duration-200 ${
!show ? '-translate-y-full' : 'translate-y-0' !show ? '-translate-y-full' : 'translate-y-0'
} ${!atZero ? 'shadow-sm' : ''}`} } ${!atZero ? 'shadow-sm' : ''}`}
data-testid="appbar" data-testid="appbar"
> >
<div className="lg:hidden"> <div className="lg:hidden">
<Button color="black" className="rounded-full w-12 h-12" onClick={handleShowDrawer} type="text"> <Button color="black" className="rounded-full w-10 h-10" onClick={handleShowDrawer} type="text">
<MenuIcon className="w-10 h-10" /> <MenuIcon className="w-10 h-10" />
</Button> </Button>
</div> </div>
@ -54,7 +54,7 @@ export default function AppBar({ title: Title, overflowRef, onOverflowClick }) {
<Button <Button
aria-label="More options" aria-label="More options"
color="black" color="black"
className="rounded-full w-12 h-12" className="rounded-full w-9 h-9"
onClick={onOverflowClick} onClick={onOverflowClick}
type="text" type="text"
> >

View File

@ -0,0 +1,37 @@
import { h } from 'preact';
import { useCallback, useState } from 'preact/hooks';
export default function ButtonsTabbed({
viewModes = [''],
setViewMode = null,
setHeader = null,
headers = [''],
className = 'text-gray-600 py-0 px-4 block hover:text-gray-500',
selectedClassName = `${className} focus:outline-none border-b-2 font-medium border-gray-500`
}) {
const [selected, setSelected] = useState(0);
const captitalize = (str) => { return (`${str.charAt(0).toUpperCase()}${str.slice(1)}`); };
const getHeader = useCallback((i) => {
return (headers.length === viewModes.length ? headers[i] : captitalize(viewModes[i]));
}, [headers, viewModes]);
const handleClick = useCallback((i) => {
setSelected(i);
setViewMode && setViewMode(viewModes[i]);
setHeader && setHeader(getHeader(i));
}, [setViewMode, setHeader, setSelected, viewModes, getHeader]);
setHeader && setHeader(getHeader(selected));
return (
<nav className="flex justify-end">
{viewModes.map((item, i) => {
return (
<button onClick={() => handleClick(i)} className={i === selected ? selectedClassName : className}>
{captitalize(item)}
</button>
);
})}
</nav>
);
}

View File

@ -12,7 +12,8 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const canvasRef = useRef(null); const canvasRef = useRef(null);
const [{ width: availableWidth }] = useResizeObserver(containerRef); const [{ width: availableWidth }] = useResizeObserver(containerRef);
const { name, width, height } = config.cameras[camera]; const { name } = config.cameras[camera];
const { width, height } = config.cameras[camera].detect;
const aspectRatio = width / height; const aspectRatio = width / height;
const scaledHeight = useMemo(() => { const scaledHeight = useMemo(() => {

View File

@ -12,7 +12,7 @@ export default function JSMpegPlayer({ camera }) {
playerRef.current, playerRef.current,
url, url,
{}, {},
{protocols: [], audio: false} {protocols: [], audio: false, videoBufferSize: 1024*1024*4}
); );
const fullscreen = () => { const fullscreen = () => {

View File

@ -22,7 +22,7 @@ export default function NavigationDrawer({ children, header }) {
onClick={handleDismiss} onClick={handleDismiss}
> >
{header ? ( {header ? (
<div className="flex-shrink-0 p-5 flex flex-row items-center justify-between border-b border-gray-200 dark:border-gray-700"> <div className="flex-shrink-0 p-2 flex flex-row items-center justify-between border-b border-gray-200 dark:border-gray-700">
{header} {header}
</div> </div>
) : null} ) : null}

View File

@ -7,7 +7,7 @@ import { render, screen } from '@testing-library/preact';
describe('CameraImage', () => { describe('CameraImage', () => {
beforeEach(() => { beforeEach(() => {
jest.spyOn(Api, 'useConfig').mockImplementation(() => { jest.spyOn(Api, 'useConfig').mockImplementation(() => {
return { data: { cameras: { front: { name: 'front', width: 1280, height: 720 } } } }; return { data: { cameras: { front: { name: 'front', detect: { width: 1280, height: 720 } } } } };
}); });
jest.spyOn(Api, 'useApiHost').mockReturnValue('http://base-url.local:5000'); jest.spyOn(Api, 'useApiHost').mockReturnValue('http://base-url.local:5000');
jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]); jest.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 0 }]);

View File

@ -0,0 +1,13 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function FrigateRestart({ className = '' }) {
return (
<svg className={`fill-current ${className}`} viewBox="0 0 24 24">
<rect fill="none" height="24" width="24" />
<path d="M12 6v3l4-4-4-4v3c-4.42 0-8 3.58-8 8 0 1.57.46 3.03 1.24 4.26L6.7 14.8c-.45-.83-.7-1.79-.7-2.8 0-3.31 2.69-6 6-6zm6.76 1.74L17.3 9.2c.44.84.7 1.79.7 2.8 0 3.31-2.69 6-6 6v-3l-4 4 4 4v-3c4.42 0 8-3.58 8-8 0-1.57-.46-3.03-1.24-4.26z" />
</svg>
);
}
export default memo(FrigateRestart);

View File

@ -7,6 +7,7 @@ import Heading from '../components/Heading';
import Link from '../components/Link'; import Link from '../components/Link';
import SettingsIcon from '../icons/Settings'; import SettingsIcon from '../icons/Settings';
import Switch from '../components/Switch'; import Switch from '../components/Switch';
import ButtonsTabbed from '../components/ButtonsTabbed';
import { usePersistence } from '../context'; import { usePersistence } from '../context';
import { useCallback, useMemo, useState } from 'preact/hooks'; import { useCallback, useMemo, useState } from 'preact/hooks';
import { useApiHost, useConfig } from '../api'; import { useApiHost, useConfig } from '../api';
@ -112,16 +113,7 @@ export default function Camera({ camera }) {
return ( return (
<div className="space-y-4"> <div className="space-y-4">
<Heading size="2xl">{camera}</Heading> <Heading size="2xl">{camera}</Heading>
<div> <ButtonsTabbed viewModes={['live', 'debug']} setViewMode={setViewMode} />
<nav className="flex justify-end">
<button onClick={() => setViewMode('live')} className={viewMode === 'live' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Live
</button>
<button onClick={() => setViewMode('debug')} className={viewMode === 'debug' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Debug
</button>
</nav>
</div>
{player} {player}

View File

@ -15,13 +15,16 @@ export default function CameraMasks({ camera, url }) {
const cameraConfig = config.cameras[camera]; const cameraConfig = config.cameras[camera];
const { const {
width,
height,
motion: { mask: motionMask }, motion: { mask: motionMask },
objects: { filters: objectFilters }, objects: { filters: objectFilters },
zones, zones,
} = cameraConfig; } = cameraConfig;
const {
width,
height,
} = cameraConfig.detect;
const [{ width: scaledWidth }] = useResizeObserver(imageRef); const [{ width: scaledWidth }] = useResizeObserver(imageRef);
const imageScale = scaledWidth / width; const imageScale = scaledWidth / width;

View File

@ -5,7 +5,7 @@ import CameraImage from '../components/CameraImage';
import ClipIcon from '../icons/Clip'; import ClipIcon from '../icons/Clip';
import MotionIcon from '../icons/Motion'; import MotionIcon from '../icons/Motion';
import SnapshotIcon from '../icons/Snapshot'; import SnapshotIcon from '../icons/Snapshot';
import { useDetectState, useClipsState, useSnapshotsState } from '../api/mqtt'; import { useDetectState, useRecordingsState, useSnapshotsState } from '../api/mqtt';
import { useConfig, FetchStatus } from '../api'; import { useConfig, FetchStatus } from '../api';
import { useMemo } from 'preact/hooks'; import { useMemo } from 'preact/hooks';
@ -25,7 +25,7 @@ export default function Cameras() {
function Camera({ name, conf }) { function Camera({ name, conf }) {
const { payload: detectValue, send: sendDetect } = useDetectState(name); const { payload: detectValue, send: sendDetect } = useDetectState(name);
const { payload: clipValue, send: sendClips } = useClipsState(name); const { payload: recordValue, send: sendRecordings } = useRecordingsState(name);
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name); const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
const href = `/cameras/${name}`; const href = `/cameras/${name}`;
const buttons = useMemo(() => { const buttons = useMemo(() => {
@ -46,11 +46,11 @@ function Camera({ name, conf }) {
}, },
}, },
{ {
name: `Toggle clips ${clipValue === 'ON' ? 'off' : 'on'}`, name: `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}`,
icon: ClipIcon, icon: ClipIcon,
color: clipValue === 'ON' ? 'blue' : 'gray', color: recordValue === 'ON' ? 'blue' : 'gray',
onClick: () => { onClick: () => {
sendClips(clipValue === 'ON' ? 'OFF' : 'ON'); sendRecordings(recordValue === 'ON' ? 'OFF' : 'ON');
}, },
}, },
{ {
@ -62,7 +62,7 @@ function Camera({ name, conf }) {
}, },
}, },
], ],
[detectValue, sendDetect, clipValue, sendClips, snapshotValue, sendSnapshots] [detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
); );
return ( return (

View File

@ -115,8 +115,8 @@ export default function Event({ eventId }) {
options={{ options={{
sources: [ sources: [
{ {
src: `${apiHost}/clips/${data.camera}-${eventId}.mp4`, src: `${apiHost}/vod/event/${eventId}/index.m3u8`,
type: 'video/mp4', type: 'application/vnd.apple.mpegurl',
}, },
], ],
poster: data.has_snapshot poster: data.has_snapshot
@ -127,10 +127,20 @@ export default function Event({ eventId }) {
onReady={(player) => {}} onReady={(player) => {}}
/> />
<div className="text-center"> <div className="text-center">
<Button className="mx-2" color="blue" href={`${apiHost}/clips/${data.camera}-${eventId}.mp4`} download> <Button
className="mx-2"
color="blue"
href={`${apiHost}/api/events/${eventId}/clip.mp4?download=true`}
download
>
<Clip className="w-6" /> Download Clip <Clip className="w-6" /> Download Clip
</Button> </Button>
<Button className="mx-2" color="blue" href={`${apiHost}/clips/${data.camera}-${eventId}.jpg`} download> <Button
className="mx-2"
color="blue"
href={`${apiHost}/api/events/${eventId}/snapshot.jpg?download=true`}
download
>
<Snapshot className="w-6" /> Download Snapshot <Snapshot className="w-6" /> Download Snapshot
</Button> </Button>
</div> </div>

View File

@ -51,13 +51,13 @@ describe('Cameras Route', () => {
test('buttons toggle detect, clips, and snapshots', async () => { test('buttons toggle detect, clips, and snapshots', async () => {
const sendDetect = jest.fn(); const sendDetect = jest.fn();
const sendClips = jest.fn(); const sendRecordings = jest.fn();
const sendSnapshots = jest.fn(); const sendSnapshots = jest.fn();
jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => { jest.spyOn(Mqtt, 'useDetectState').mockImplementation(() => {
return { payload: 'ON', send: sendDetect }; return { payload: 'ON', send: sendDetect };
}); });
jest.spyOn(Mqtt, 'useClipsState').mockImplementation(() => { jest.spyOn(Mqtt, 'useRecordingsState').mockImplementation(() => {
return { payload: 'OFF', send: sendClips }; return { payload: 'OFF', send: sendRecordings };
}); });
jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => { jest.spyOn(Mqtt, 'useSnapshotsState').mockImplementation(() => {
return { payload: 'ON', send: sendSnapshots }; return { payload: 'ON', send: sendSnapshots };
@ -72,11 +72,11 @@ describe('Cameras Route', () => {
fireEvent.click(screen.getAllByLabelText('Toggle snapshots off')[0]); fireEvent.click(screen.getAllByLabelText('Toggle snapshots off')[0]);
expect(sendSnapshots).toHaveBeenCalledWith('OFF'); expect(sendSnapshots).toHaveBeenCalledWith('OFF');
fireEvent.click(screen.getAllByLabelText('Toggle clips on')[0]); fireEvent.click(screen.getAllByLabelText('Toggle recordings on')[0]);
expect(sendClips).toHaveBeenCalledWith('ON'); expect(sendRecordings).toHaveBeenCalledWith('ON');
expect(sendDetect).toHaveBeenCalledTimes(1); expect(sendDetect).toHaveBeenCalledTimes(1);
expect(sendSnapshots).toHaveBeenCalledTimes(1); expect(sendSnapshots).toHaveBeenCalledTimes(1);
expect(sendClips).toHaveBeenCalledTimes(1); expect(sendRecordings).toHaveBeenCalledTimes(1);
}); });
}); });