This commit is contained in:
Josh Hawkins 2024-01-31 10:56:12 -06:00
commit 82f4d7f52e
377 changed files with 34519 additions and 12951 deletions

View File

@ -10,7 +10,7 @@
"features": {
"ghcr.io/devcontainers/features/common-utils:1": {}
},
"forwardPorts": [5000, 5001, 5173, 1935, 8554, 8555],
"forwardPorts": [5000, 5001, 5173, 8554, 8555],
"portsAttributes": {
"5000": {
"label": "NGINX",
@ -24,10 +24,6 @@
"label": "Vite Server",
"onAutoForward": "silent"
},
"1935": {
"label": "RTMP",
"onAutoForward": "silent"
},
"8554": {
"label": "gortc RTSP",
"onAutoForward": "silent"

View File

@ -40,9 +40,9 @@ jobs:
node-version: 16.x
- run: npm install
working-directory: ./web
- name: Lint
run: npm run lint
working-directory: ./web
# - name: Lint
# run: npm run lint
# working-directory: ./web
web_test:
name: Web - Test
@ -51,12 +51,12 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-node@master
with:
node-version: 16.x
node-version: 20.x
- run: npm install
working-directory: ./web
- name: Test
run: npm run test
working-directory: ./web
# - name: Test
# run: npm run test
# working-directory: ./web
python_checks:
runs-on: ubuntu-latest

1
.gitignore vendored
View File

@ -8,7 +8,6 @@ config/*
!config/*.example
models
*.mp4
*.ts
*.db
*.csv
frigate/version.py

View File

@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.13.0
VERSION = 0.14.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
CURRENT_UID := $(shell id -u)

View File

@ -191,7 +191,6 @@ COPY --from=deps-rootfs / /
RUN ldconfig
EXPOSE 5000
EXPOSE 1935
EXPOSE 8554
EXPOSE 8555/tcp 8555/udp
@ -237,7 +236,7 @@ CMD ["sleep", "infinity"]
# Frigate web build
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
FROM --platform=$BUILDPLATFORM node:16 AS web-build
FROM --platform=$BUILDPLATFORM node:20 AS web-build
WORKDIR /work
COPY web/package.json web/package-lock.json ./

View File

@ -5,7 +5,6 @@ set -euxo pipefail
NGINX_VERSION="1.25.3"
VOD_MODULE_VERSION="1.31"
SECURE_TOKEN_MODULE_VERSION="1.5"
RTMP_MODULE_VERSION="1.2.2"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
@ -49,10 +48,6 @@ mkdir /tmp/nginx-secure-token-module
wget https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz
tar -zxf ${SECURE_TOKEN_MODULE_VERSION}.tar.gz -C /tmp/nginx-secure-token-module --strip-components=1
rm ${SECURE_TOKEN_MODULE_VERSION}.tar.gz
mkdir /tmp/nginx-rtmp-module
wget -nv https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz
tar -zxf v${RTMP_MODULE_VERSION}.tar.gz -C /tmp/nginx-rtmp-module --strip-components=1
rm v${RTMP_MODULE_VERSION}.tar.gz
cd /tmp/nginx
@ -63,7 +58,6 @@ cd /tmp/nginx
--with-threads \
--add-module=../nginx-vod-module \
--add-module=../nginx-secure-token-module \
--add-module=../nginx-rtmp-module \
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
make CC="ccache gcc" -j$(nproc) && make install

View File

@ -7,6 +7,7 @@ numpy == 1.23.*
onvif_zeep == 0.2.12
opencv-python-headless == 4.7.0.*
paho-mqtt == 1.6.*
pandas == 2.1.4
peewee == 3.17.*
peewee_migrate == 1.12.*
psutil == 5.9.*

View File

@ -275,18 +275,3 @@ http {
}
}
}
rtmp {
server {
listen 1935;
chunk_size 4096;
allow publish 127.0.0.1;
deny publish all;
allow play all;
application live {
live on;
record off;
meta copy;
}
}
}

View File

@ -23,8 +23,8 @@ else
fi
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
wget -q https://github.com/madsciencetist/jetson-ffmpeg/archive/refs/heads/master.zip
unzip master.zip && rm master.zip && cd jetson-ffmpeg-master
wget -q https://github.com/AndBobsYourUncle/jetson-ffmpeg/archive/9c17b09.zip -O jetson-ffmpeg.zip
unzip jetson-ffmpeg.zip && rm jetson-ffmpeg.zip && mv jetson-ffmpeg-* jetson-ffmpeg && cd jetson-ffmpeg
LD_LIBRARY_PATH=$(pwd)/stubs:$LD_LIBRARY_PATH # tegra multimedia libs aren't available in image, so use stubs for ffmpeg build
mkdir build
cd build
@ -42,7 +42,7 @@ cd ../ && rm -rf nv-codec-headers-master
# Build ffmpeg with nvmpi patch
wget -q https://ffmpeg.org/releases/ffmpeg-6.0.tar.xz
tar xaf ffmpeg-*.tar.xz && rm ffmpeg-*.tar.xz && cd ffmpeg-*
patch -p1 < ../jetson-ffmpeg-master/ffmpeg_patches/ffmpeg6.0_nvmpi.patch
patch -p1 < ../jetson-ffmpeg/ffmpeg_patches/ffmpeg6.0_nvmpi.patch
export PKG_CONFIG_PATH=$INSTALL_PREFIX/lib/pkgconfig
# enable Jetson codecs but disable dGPU codecs
./configure --cc='ccache gcc' --cxx='ccache g++' \

View File

@ -69,16 +69,12 @@ cameras:
ffmpeg:
output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
rtmp: -c:v copy -c:a aac -f flv
inputs:
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
roles:
- detect
- record
- rtmp
rtmp:
enabled: False # <-- RTMP should be disabled if your stream is not H264
detect:
width: # <- optional, by default Frigate tries to automatically detect resolution
height: # <- optional, by default Frigate tries to automatically detect resolution
@ -105,6 +101,15 @@ If available, recommended settings are:
According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink.
Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels.
The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras.
:::caution
The below configuration only works for reolink cameras with stream resolution of 5MP or lower, 8MP+ cameras need to use RTSP as http-flv is not supported in this case.
:::
```yaml
go2rtc:
streams:
@ -112,6 +117,11 @@ go2rtc:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
your_reolink_camera_sub:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
your_reolink_camera_via_nvr:
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15
- "ffmpeg:your_reolink_camera_via_nvr#audio=aac"
your_reolink_camera_via_nvr_sub:
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_ext.bcs&user=username&password=password"
cameras:
your_reolink_camera:
@ -125,6 +135,17 @@ cameras:
input_args: preset-rtsp-restream
roles:
- detect
reolink_via_nvr:
ffmpeg:
inputs:
- path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr?video=copy&audio=aac
input_args: preset-rtsp-restream
roles:
- record
- path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr_sub?video=copy
input_args: preset-rtsp-restream
roles:
- detect
```
#### Reolink Doorbell
@ -156,13 +177,12 @@ go2rtc:
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect.
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
```yaml
ffmpeg:
output_args:
record: preset-record-ubiquiti
rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead
```
### TP-Link VIGI Cameras

View File

@ -16,7 +16,6 @@ Each role can only be assigned to one input per camera. The options for roles ar
| `detect` | Main feed for object detection. [docs](object_detectors.md) |
| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) |
| `audio` | Feed for audio based detection. [docs](audio_detectors.md) |
| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) |
```yaml
mqtt:
@ -29,7 +28,6 @@ cameras:
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
roles:
- detect
- rtmp # <- deprecated, recommend using restream instead
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live
roles:
- record

View File

@ -18,9 +18,7 @@ See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on
| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen |
| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead |
| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead |
| preset-nvidia-h264 | Nvidia GPU with h264 stream | |
| preset-nvidia-h265 | Nvidia GPU with h265 stream | |
| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 |
| preset-nvidia | Nvidia GPU | |
| preset-jetson-h264 | Nvidia Jetson with h264 stream | |
| preset-jetson-h265 | Nvidia Jetson with h265 stream | |
| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode |
@ -76,8 +74,8 @@ Output args presets help make the config more readable and handle use cases for
| Preset | Usage | Other Notes |
| -------------------------------- | --------------------------------- | --------------------------------------------- |
| preset-record-generic | Record WITHOUT audio | This is the default when nothing is specified |
| preset-record-generic-audio-aac | Record WITH aac audio | Use this to enable audio in recordings |
| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings |
| preset-record-generic-audio-aac | Record WITH transcoded aac audio | Use this to transcode to aac audio. If your source is already aac, use preset-record-generic-audio-copy instead to avoid re-encoding |
| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead |
| preset-record-jpeg | Record live jpeg | Recommend restreaming live jpeg instead |
| preset-record-ubiquiti | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio |

View File

@ -5,7 +5,9 @@ title: Hardware Acceleration
# Hardware Acceleration
It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
# Officially Supported

View File

@ -1,33 +1,35 @@
---
id: index
title: Frigate Configuration Reference
title: Frigate Configuration
---
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored.
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md):
It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation.
```yaml
mqtt:
host: mqtt.server.com
enabled: False
cameras:
back:
dummy_camera: # <--- this will be changed to your actual camera later
enabled: False
ffmpeg:
inputs:
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
- path: rtsp://127.0.0.1:554/rtsp
roles:
- detect
```
### VSCode Configuration Schema
## VSCode Configuration Schema
VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon.
### Environment Variable Substitution
## Environment Variable Substitution
Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the configuration reference below. For example, the following values can be replaced at runtime by using environment variables:
Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the [reference config](./reference.md). For example, the following values can be replaced at runtime by using environment variables:
```yaml
mqtt:
@ -47,631 +49,188 @@ onvif:
password: "{FRIGATE_RTSP_PASSWORD}"
```
### Full configuration reference:
```yaml
go2rtc:
rtsp:
username: "{FRIGATE_GO2RTC_RTSP_USERNAME}"
password: "{FRIGATE_GO2RTC_RTSP_PASSWORD}"
```
:::caution
## Common configuration examples
It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions.
Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
:::
### Raspberry Pi Home Assistant Addon with USB Coral
- Single camera with 720p, 5fps stream for detect
- MQTT connected to home assistant mosquitto addon
- Hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
```yaml
mqtt:
# Optional: Enable mqtt server (default: shown below)
enabled: True
# Required: host name
host: mqtt.server.com
# Optional: port (default: shown below)
port: 1883
# Optional: topic prefix (default: shown below)
# NOTE: must be unique if you are running multiple instances
topic_prefix: frigate
# Optional: client id (default: shown below)
# NOTE: must be unique if you are running multiple instances
client_id: frigate
# Optional: user
# NOTE: MQTT user can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'.
# e.g. user: '{FRIGATE_MQTT_USER}'
user: mqtt_user
# Optional: password
# NOTE: MQTT password can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'.
# e.g. password: '{FRIGATE_MQTT_PASSWORD}'
password: password
# Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None)
tls_ca_certs: /path/to/ca.crt
# Optional: tls_client_cert and tls_client key in order to use self-signed client
# certificates (default: None)
# NOTE: certificate must not be password-protected
# do not set user and password when using a client certificate
tls_client_cert: /path/to/client.crt
tls_client_key: /path/to/client.key
# Optional: tls_insecure (true/false) for enabling TLS verification of
# the server hostname in the server certificate (default: None)
tls_insecure: false
# Optional: interval in seconds for publishing stats (default: shown below)
stats_interval: 60
host: core-mosquitto
user: mqtt-user
password: xxxxxxxxxx
# Optional: Detectors configuration. Defaults to a single CPU detector
detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
type: cpu
# Optional: Database configuration
database:
# The path to store the SQLite DB (default: shown below)
path: /config/frigate.db
# Optional: model modifications
model:
# Optional: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Audio Events Configuration
# NOTE: Can be overridden at the camera level
audio:
# Optional: Enable audio events (default: shown below)
enabled: False
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
max_not_heard: 30
# Optional: Configure the min rms volume required to run audio detection (default: shown below)
# As a rule of thumb:
# - 200 - high sensitivity
# - 500 - medium sensitivity
# - 1000 - low sensitivity
min_volume: 500
# Optional: Types of audio to listen for (default: shown below)
listen:
- bark
- fire_alarm
- scream
- speech
- yell
# Optional: Filters to configure detection.
filters:
# Label that matches label in listen config.
speech:
# Minimum score that triggers an audio event (default: shown below)
threshold: 0.8
# Optional: logger verbosity settings
logger:
# Optional: Default log verbosity (default: shown below)
default: info
# Optional: Component specific logger overrides
logs:
frigate.event: debug
# Optional: set environment variables
environment_vars:
EXAMPLE_VAR: value
# Optional: birdseye configuration
# NOTE: Can (enabled, mode) be overridden at the camera level
birdseye:
# Optional: Enable birdseye view (default: shown below)
enabled: True
# Optional: Restream birdseye via RTSP (default: shown below)
# NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat.
restream: False
# Optional: Width of the output resolution (default: shown below)
width: 1280
# Optional: Height of the output resolution (default: shown below)
height: 720
# Optional: Encoding quality of the mpeg1 feed (default: shown below)
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
quality: 8
# Optional: Mode of the view. Available options are: objects, motion, and continuous
# objects - cameras are included if they have had a tracked object within the last 30 seconds
# motion - cameras are included if motion was detected in the last 30 seconds
# continuous - all cameras are included always
mode: objects
# Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
ffmpeg:
# Optional: global ffmpeg args (default: shown below)
global_args: -hide_banner -loglevel warning -threads 2
# Optional: global hwaccel args (default: shown below)
# NOTE: See hardware acceleration docs for your specific device
hwaccel_args: []
# Optional: global input args (default: shown below)
input_args: preset-rtsp-generic
# Optional: global output args
output_args:
# Optional: output args for detect streams (default: shown below)
detect: -threads 2 -f rawvideo -pix_fmt yuv420p
# Optional: output args for record streams (default: shown below)
record: preset-record-generic
# Optional: output args for rtmp streams (default: shown below)
rtmp: preset-rtmp-generic
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
hwaccel_args: preset-rpi-64-h264
# Optional: Detect configuration
# NOTE: Can be overridden at the camera level
detect:
# Optional: width of the frame for the input with the detect role (default: use native stream resolution)
width: 1280
# Optional: height of the frame for the input with the detect role (default: use native stream resolution)
height: 720
# Optional: desired fps for your camera for the input with the detect role (default: shown below)
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
fps: 5
# Optional: enables detection for the camera (default: True)
enabled: True
# Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate)
min_initialized: 2
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
max_disappeared: 25
# Optional: Configuration for stationary object tracking
stationary:
# Optional: Frequency for confirming stationary objects (default: same as threshold)
# When set to 1, object detection will run to confirm the object still exists on every frame.
# If set to 10, object detection will run to confirm the object still exists on every 10th frame.
interval: 50
# Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s)
threshold: 50
# Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever)
# This can help with false positives for objects that should only be stationary for a limited amount of time.
# It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave
# car at the default.
# WARNING: Setting these values overrides default behavior and disables stationary object tracking.
# There are very few situations where you would want it disabled. It is NOT recommended to
# copy these values from the example config into your config unless you know they are needed.
max_frames:
# Optional: Default for all object types (default: not set, track forever)
default: 3000
# Optional: Object specific values
objects:
person: 1000
# Optional: Milliseconds to offset detect annotations by (default: shown below).
# There can often be latency between a recording and the detect process,
# especially when using separate streams for detect and record.
# Use this setting to make the timeline bounding boxes more closely align
# with the recording. The value can be positive or negative.
# TIP: Imagine there is an event clip with a person walking from left to right.
# If the event timeline bounding box is consistently to the left of the person
# then the value should be decreased. Similarly, if a person is walking from
# left to right and the bounding box is consistently ahead of the person
# then the value should be increased.
# TIP: This offset is dynamic so you can change the value and it will update existing
# events, this makes it easy to tune.
# WARNING: Fast moving objects will likely not have the bounding box align.
annotation_offset: 0
detectors:
coral:
type: edgetpu
device: usb
# Optional: Object configuration
# NOTE: Can be overridden at the camera level
objects:
# Optional: list of objects to track from labelmap.txt (default: shown below)
track:
- person
# Optional: mask to prevent all object types from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object.
# NOTE: This mask is COMBINED with the object type specific mask below
mask: 0,0,1000,0,1000,200,0,200
# Optional: filters to reduce false positives for specific object types
filters:
person:
# Optional: minimum width*height of the bounding box for the detected object (default: 0)
min_area: 5000
# Optional: maximum width*height of the bounding box for the detected object (default: 24000000)
max_area: 100000
# Optional: minimum width/height of the bounding box for the detected object (default: 0)
min_ratio: 0.5
# Optional: maximum width/height of the bounding box for the detected object (default: 24000000)
max_ratio: 2.0
# Optional: minimum score for the object to initiate tracking (default: shown below)
min_score: 0.5
# Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below)
threshold: 0.7
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object
mask: 0,0,1000,0,1000,200,0,200
# Optional: Motion configuration
# NOTE: Can be overridden at the camera level
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.
# As a rule of thumb:
# - 10 - high sensitivity
# - 30 - medium sensitivity
# - 50 - low sensitivity
contour_area: 10
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
# Low values will cause things like moving shadows to be detected as motion for longer.
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
frame_alpha: 0.01
# Optional: Height of the resized motion frame (default: 100)
# Higher values will result in more granular motion detection at the expense of higher CPU usage.
# Lower values result in less CPU, but small changes may not register as motion.
frame_height: 100
# Optional: motion mask
# NOTE: see docs for more detailed info on creating masks
mask: 0,900,1080,900,1080,1920,0,1920
# Optional: improve contrast (default: shown below)
# Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive
# for daytime.
improve_contrast: True
# Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below).
mqtt_off_delay: 30
# Optional: Record configuration
# NOTE: Can be overridden at the camera level
record:
# Optional: Enable recording (default: shown below)
# WARNING: If recording is disabled in the config, turning it on via
# the UI or MQTT later will have no effect.
enabled: False
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
expire_interval: 60
# Optional: Sync recordings with disk on startup and once a day (default: shown below).
sync_recordings: False
# Optional: Retention settings for recording
enabled: True
retain:
# Optional: Number of days to retain recordings regardless of events (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in events section below
# if you only want to retain recordings of events.
days: 0
# Optional: Mode for retention. Available options are: all, motion, and active_objects
# all - save all recording segments regardless of activity
# motion - save all recordings segments with any detected motion
# active_objects - save all recording segments with active/moving objects
# NOTE: this mode only applies when the days setting above is greater than 0
mode: all
# Optional: Recording Export Settings
export:
# Optional: Timelapse Output Args (default: shown below).
# NOTE: The default args are set to fit 24 hours of recording into 1 hour playback.
# See https://stackoverflow.com/a/58268695 for more info on how these args work.
# As an example: if you wanted to go from 24 hours to 30 minutes that would be going
# from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02.
# The -r (framerate) dictates how smooth the output video is.
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
timelapse_args: "-vf setpts=0.04*PTS -r 30"
# Optional: Event recording settings
days: 7
mode: motion
events:
# Optional: Number of seconds before the event to include (default: shown below)
pre_capture: 5
# Optional: Number of seconds after the event to include (default: shown below)
post_capture: 5
# Optional: Objects to save recordings for. (default: all tracked objects)
objects:
- person
# Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Retention settings for recordings of events
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Mode for retention. (default: shown below)
# all - save all recording segments for events regardless of activity
# motion - save all recordings segments for events with any detected motion
# active_objects - save all recording segments for event with active/moving objects
#
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
# here, the segments will already be gone by the time this mode is applied.
# For example, if the camera retain mode is "motion", the segments without motion are
# never stored, so setting the mode to "all" here won't bring them back.
default: 30
mode: motion
# Optional: Per object retention days
objects:
person: 15
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
# NOTE: Can be overridden at the camera level
snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
enabled: False
# Optional: save a clean PNG copy of the snapshot image (default: shown below)
clean_copy: True
# Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False
# Optional: draw bounding box on the snapshots (default: shown below)
bounding_box: True
# Optional: crop the snapshot (default: shown below)
crop: False
# Optional: height to resize the snapshot to (default: original size)
height: 175
# Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Camera override for retention settings (default: global values)
enabled: True
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Per object retention days
objects:
person: 15
# Optional: quality of the encoded jpeg, 0-100 (default: shown below)
quality: 70
default: 30
# Optional: RTMP configuration
# NOTE: RTMP is deprecated in favor of restream
# NOTE: Can be overridden at the camera level
rtmp:
# Optional: Enable the RTMP stream (default: False)
cameras:
name_of_your_camera:
detect:
width: 1280
height: 720
fps: 5
ffmpeg:
inputs:
- path: rtsp://10.0.10.10:554/rtsp
roles:
- detect
motion:
mask:
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
```
### Standalone Intel Mini PC with USB Coral
- Single camera with 720p, 5fps stream for detect
- MQTT disabled (not integrated with home assistant)
- VAAPI hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
```yaml
mqtt:
enabled: False
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.8.3)
go2rtc:
ffmpeg:
hwaccel_args: preset-vaapi
# Optional: jsmpeg stream configuration for WebUI
live:
# Optional: Set the name of the stream that should be used for live view
# in frigate WebUI. (default: name of camera)
stream_name: camera_name
# Optional: Set the height of the jsmpeg stream. (default: 720)
# This must be less than or equal to the height of the detect stream. Lower resolutions
# reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio.
height: 720
# Optional: Set the encode quality of the jsmpeg stream (default: shown below)
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
quality: 8
detectors:
coral:
type: edgetpu
device: usb
# Optional: in-feed timestamp style configuration
# NOTE: Can be overridden at the camera level
timestamp_style:
# Optional: Position of the timestamp (default: shown below)
# "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right)
position: "tl"
# Optional: Format specifier conform to the Python package "datetime" (default: shown below)
# Additional Examples:
# german: "%d.%m.%Y %H:%M:%S"
format: "%m/%d/%Y %H:%M:%S"
# Optional: Color of font
color:
# All Required when color is specified (default: shown below)
red: 255
green: 255
blue: 255
# Optional: Line thickness of font (default: shown below)
thickness: 2
# Optional: Effect of lettering (default: shown below)
# None (No effect),
# "solid" (solid background in inverse color of font)
# "shadow" (shadow for font)
effect: None
record:
enabled: True
retain:
days: 7
mode: motion
events:
retain:
default: 30
mode: motion
snapshots:
enabled: True
retain:
default: 30
# Required
cameras:
# Required: name of the camera
back:
# Optional: Enable/Disable the camera (default: shown below).
# If disabled: config is used but no live stream and no capture etc.
# Events/Recordings are still viewable.
enabled: True
# Required: ffmpeg settings for the camera
name_of_your_camera:
detect:
width: 1280
height: 720
fps: 5
ffmpeg:
# Required: A list of input streams for the camera. See documentation for more information.
inputs:
# Required: the path to the stream
# NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: audio,detect,record,rtmp
# NOTICE: In addition to assigning the audio, record, and rtmp roles,
# they must also be enabled in the camera config.
- path: rtsp://10.0.10.10:554/rtsp
roles:
- audio
- detect
- record
- rtmp
# Optional: stream specific global args (default: inherit)
# global_args:
# Optional: stream specific hwaccel args (default: inherit)
# hwaccel_args:
# Optional: stream specific input args (default: inherit)
# input_args:
# Optional: camera specific global args (default: inherit)
# global_args:
# Optional: camera specific hwaccel args (default: inherit)
# hwaccel_args:
# Optional: camera specific input args (default: inherit)
# input_args:
# Optional: camera specific output args (default: inherit)
# output_args:
# Optional: timeout for highest scoring image before allowing it
# to be replaced by a newer image. (default: shown below)
best_image_timeout: 60
# Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera.
webui_url: ""
# Optional: zones for this camera
zones:
# Required: name of the zone
# NOTE: This must be different than any camera names, but can match with another zone on another
# camera.
front_steps:
# Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
coordinates: 545,1077,747,939,788,805
# Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below).
inertia: 3
# Optional: List of objects that can trigger this zone (default: all tracked objects)
objects:
- person
# Optional: Zone level object filters.
# NOTE: The global and camera filters are applied upstream.
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.7
# Optional: Configuration for the jpg snapshots published via MQTT
mqtt:
# Optional: Enable publishing snapshot via mqtt for camera (default: shown below)
# NOTE: Only applies to publishing image data to MQTT via 'frigate/<camera_name>/<object_name>/snapshot'.
# All other messages will still be published.
enabled: True
# Optional: print a timestamp on the snapshots (default: shown below)
timestamp: True
# Optional: draw bounding box on the snapshots (default: shown below)
bounding_box: True
# Optional: crop the snapshot (default: shown below)
crop: True
# Optional: height to resize the snapshot to (default: shown below)
height: 270
# Optional: jpeg encode quality (default: shown below)
quality: 70
# Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Configuration for how camera is handled in the GUI.
ui:
# Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below)
# By default the cameras are sorted alphabetically.
order: 0
# Optional: Whether or not to show the camera in the Frigate UI (default: shown below)
dashboard: True
# Optional: connect to ONVIF camera
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
# Optional: username for login.
# NOTE: Some devices require admin to access ONVIF.
user: admin
# Optional: password for login.
password: admin
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:
# Optional: enable/disable object autotracking. (default: shown below)
enabled: False
# Optional: calibrate the camera on startup (default: shown below)
# A calibration will move the PTZ in increments and measure the time it takes to move.
# The results are used to help estimate the position of tracked objects after a camera move.
# Frigate will update your config file automatically after a calibration with
# a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False.
calibrate_on_startup: False
# Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below)
# Available options are: disabled, absolute, and relative
# disabled - don't zoom in/out on autotracked objects, use pan/tilt only
# absolute - use absolute zooming (supported by most PTZ capable cameras)
# relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements)
zooming: disabled
# Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below)
# A lower value will keep more of the scene in view around a tracked object.
# A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly.
# The value should be between 0.1 and 0.75
zoom_factor: 0.3
# Optional: list of objects to track from labelmap.txt (default: shown below)
track:
- person
# Required: Begin automatically tracking an object when it enters any of the listed zones.
required_zones:
- zone_name
# Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below)
return_preset: home
# Optional: Seconds to delay before returning to preset. (default: shown below)
timeout: 10
# Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below)
movement_weights: []
# Optional: Configuration for how to sort the cameras in the Birdseye view.
birdseye:
# Optional: Adjust sort order of cameras in the Birdseye view. Larger numbers come later (default: shown below)
# By default the cameras are sorted alphabetically.
order: 0
# Optional
ui:
# Optional: Set the default live mode for cameras in the UI (default: shown below)
live_mode: mse
# Optional: Set a timezone to use in the UI (default: use browser local time)
# timezone: America/Denver
# Optional: Use an experimental recordings / camera view UI (default: shown below)
use_experimental: False
# Optional: Set the time format used.
# Options are browser, 12hour, or 24hour (default: shown below)
time_format: browser
# Optional: Set the date style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 2/11/23
# medium: Feb 11, 2023
# full: Saturday, February 11, 2023
# (default: shown below).
date_style: short
# Optional: Set the time style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 8:14 PM
# medium: 8:15:22 PM
# full: 8:15:22 PM Mountain Standard Time
# (default: shown below).
time_style: medium
# Optional: Ability to manually override the date / time styling to use strftime format
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
# possible values are shown above (default: not set)
strftime_fmt: "%Y/%m/%d %H:%M"
# Optional: Telemetry configuration
telemetry:
# Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all)
network_interfaces:
- eth
- enp
- eno
- ens
- wl
- lo
# Optional: Configure system stats
stats:
# Enable AMD GPU stats (default: shown below)
amd_gpu_stats: True
# Enable Intel GPU stats (default: shown below)
intel_gpu_stats: True
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)
# NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions
version_check: True
motion:
mask:
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
```
### Home Assistant integrated Intel Mini PC with OpenVino
- Single camera with 720p, 5fps stream for detect
- MQTT connected to same mqtt server as home assistant
- VAAPI hardware acceleration for decoding video
- OpenVino detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
```yaml
mqtt:
host: 192.168.X.X # <---- same mqtt broker that home assistant uses
user: mqtt-user
password: xxxxxxxxxx
ffmpeg:
hwaccel_args: preset-vaapi
detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
enabled: True
retain:
days: 7
mode: motion
events:
retain:
default: 30
mode: motion
snapshots:
enabled: True
retain:
default: 30
cameras:
name_of_your_camera:
detect:
width: 1280
height: 720
fps: 5
ffmpeg:
inputs:
- path: rtsp://10.0.10.10:554/rtsp
roles:
- detect
motion:
mask:
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
```

View File

@ -3,11 +3,19 @@ id: masks
title: Masks
---
There are two types of masks available:
## Motion masks
**Motion masks**: Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again.
Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again.
**Object filter masks**: Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive.
## Object filter masks
Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive.
Object filter masks can be used to filter out stubborn false positives in fixed locations. For example, the base of this tree may be frequently detected as a person. The following image shows an example of an object filter mask (shaded red area) over the location where the bottom center is typically located to filter out person detections in a precise location.
![object mask](/img/bottom-center-mask.jpg)
## Using the mask creator
To create a poly mask:

View File

@ -176,7 +176,7 @@ volumes:
## NVidia TensorRT Detector
NVidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix. This detector is designed to work with Yolo models for object detection.
NVidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection.
### Minimum Hardware Support
@ -198,7 +198,7 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
@ -245,7 +245,7 @@ frigate:
- USE_FP16=false
```
If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU.
If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU.
```yml
frigate:
@ -295,12 +295,12 @@ Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and p
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
# Community Supported Detectors
## Rockchip RKNN-Toolkit-Lite2
This detector is only available if one of the following Rockchip SoCs is used:
- RK3588/RK3588S
- RK3568
- RK3566
@ -317,13 +317,13 @@ Use a frigate docker image with `-rk` suffix and enable privileged mode by addin
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
```yaml
detectors: # required
rknn: # required
type: rknn # required
detectors: # required
rknn: # required
type: rknn # required
# core mask for npu
core_mask: 0
model: # required
model: # required
# name of yolov8 model or path to your own .rknn model file
# possible values are:
# - default-yolov8n
@ -338,12 +338,13 @@ model: # required
height: 320
# pixel format of detection frame
# default value is rgb but yolov models usually use bgr format
input_pixel_format: bgr # required
input_pixel_format: bgr # required
# shape of detection frame
input_tensor: nhwc
```
Explanation for rknn specific options:
- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
- `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
- `core_mask: 0b001` use only core0.
@ -378,6 +379,7 @@ $ cat /sys/kernel/debug/rknpu/load
- If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`.
- If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system.
- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this:
```yaml
model:
path: /config/model_cache/rknn/my-rknn-model.rknn

View File

@ -1,8 +1,10 @@
---
id: false_positives
title: Reducing false positives
id: object_filters
title: Filters
---
There are several types of object filters that can be used to reduce false positive rates.
## Object Scores
For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85:
@ -18,6 +20,8 @@ For object filters in your configuration, any single detection below `min_score`
In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example.
show image of snapshot vs event with differing scores
### Minimum Score
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed.
@ -36,7 +40,13 @@ False positives can also be reduced by filtering a detection based on its shape.
### Object Proportions
`min_ratio` and `max_ratio` filter on the ratio of width / height of an objects bounding box and can be used to reduce false positives. For example if a false positive is detected as very tall for a dog which is often wider, a `min_ratio` filter can be used to filter out these false positives.
`min_ratio` and `max_ratio` values are compared against a given detected object's width/height ratio (in pixels). If the ratio is outside this range, the object will be ignored as a false positive. This allows objects that are proportionally too short-and-wide (higher ratio) or too tall-and-narrow (smaller ratio) to be ignored.
:::info
Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "wide flat" box. If `min_ratio` is 1.0, any object that is taller than it is wide will be ignored. Similarly, if `max_ratio` is 1.0, then any object that is wider than it is tall will be ignored.
:::
## Other Tools

View File

@ -1,6 +1,6 @@
---
id: objects
title: Objects
title: Available Objects
---
import labels from "../../../labelmap.txt";
@ -8,8 +8,9 @@ import labels from "../../../labelmap.txt";
Frigate includes the object models listed below from the Google Coral test data.
Please note:
- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
- `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects.
- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.
- `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects.
<ul>
{labels.split("\n").map((label) => (

View File

@ -3,17 +3,90 @@ id: record
title: Recording
---
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy.
H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other browsers require recordings to be encoded with H264.
## Common recording configurations
### Most conservative: Ensure all video is saved
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with events will be retained until 30 days have passed.
```yaml
record:
enabled: True
retain:
days: 3
mode: all
events:
retain:
default: 30
mode: motion
```
### Reduced storage: Only saving video when motion is detected
In order to reduce storage requirements, you can adjust your config to only retain video where motion was detected.
```yaml
record:
enabled: True
retain:
days: 3
mode: all
events:
retain:
default: 30
mode: motion
```
### Minimum: Events only
If you only want to retain video that occurs during an event, this config will discard video unless an event is ongoing.
```yaml
record:
enabled: True
retain:
days: 0
mode: all
events:
retain:
default: 30
mode: motion
```
## Will Frigate delete old recordings if my storage runs out?
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
## What if I don't want 24/7 recordings?
## Configuring Recording Retention
Frigate supports both continuous and event based recordings with separate retention modes and retention periods.
:::tip
Retention configs support decimals meaning they can be configured to retain `0.5` days, for example.
:::
### Continuous Recording
The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled.
```yaml
record:
enabled: True
retain:
days: 1 # <- number of days to keep continuous recordings
```
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
### Event Recording
If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled.
@ -22,34 +95,31 @@ record:
enabled: True
events:
retain:
default: 10
default: 10 # <- number of days to keep event recordings
```
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
When `retain -> days` is set to `0`, segments will be deleted from the cache if no events are in progress.
## Can I have "24/7" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
## What do the different retain modes mean?
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for 24/7 recording (but can also affect events).
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect events).
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording.
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of 24/7 recording.
- With the `all` option all 48 hours of those two days would be kept and viewable.
- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later.
- With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage.
- With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved.
- With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage.
A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows:
```yaml
record:
enabled: True
@ -61,11 +131,13 @@ record:
default: 14
mode: active_objects
```
The above configuration example can be added globally or on a per camera basis.
### Object Specific Retention
You can also set specific retention length for an object type. The below configuration example builds on from above but also specifies that recordings of dogs only need to be kept for 2 days and recordings of cars should be kept for 7 days.
```yaml
record:
enabled: True
@ -81,9 +153,13 @@ record:
car: 7
```
## Can I have "continuous" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
## How do I export recordings?
The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a timelapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress.
The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress.
## Syncing Recordings With Disk

View File

@ -0,0 +1,628 @@
---
id: reference
title: Full Reference Config
---
### Full configuration reference:
:::caution
It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions.
:::
```yaml
mqtt:
# Optional: Enable mqtt server (default: shown below)
enabled: True
# Required: host name
host: mqtt.server.com
# Optional: port (default: shown below)
port: 1883
# Optional: topic prefix (default: shown below)
# NOTE: must be unique if you are running multiple instances
topic_prefix: frigate
# Optional: client id (default: shown below)
# NOTE: must be unique if you are running multiple instances
client_id: frigate
# Optional: user
# NOTE: MQTT user can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'.
# e.g. user: '{FRIGATE_MQTT_USER}'
user: mqtt_user
# Optional: password
# NOTE: MQTT password can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'.
# e.g. password: '{FRIGATE_MQTT_PASSWORD}'
password: password
# Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None)
tls_ca_certs: /path/to/ca.crt
# Optional: tls_client_cert and tls_client key in order to use self-signed client
# certificates (default: None)
# NOTE: certificate must not be password-protected
# do not set user and password when using a client certificate
tls_client_cert: /path/to/client.crt
tls_client_key: /path/to/client.key
# Optional: tls_insecure (true/false) for enabling TLS verification of
# the server hostname in the server certificate (default: None)
tls_insecure: false
# Optional: interval in seconds for publishing stats (default: shown below)
stats_interval: 60
# Optional: Detectors configuration. Defaults to a single CPU detector
detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
type: cpu
# Optional: Database configuration
database:
# The path to store the SQLite DB (default: shown below)
path: /config/frigate.db
# Optional: model modifications
model:
# Optional: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Audio Events Configuration
# NOTE: Can be overridden at the camera level
audio:
# Optional: Enable audio events (default: shown below)
enabled: False
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
max_not_heard: 30
# Optional: Configure the min rms volume required to run audio detection (default: shown below)
# As a rule of thumb:
# - 200 - high sensitivity
# - 500 - medium sensitivity
# - 1000 - low sensitivity
min_volume: 500
# Optional: Types of audio to listen for (default: shown below)
listen:
- bark
- fire_alarm
- scream
- speech
- yell
# Optional: Filters to configure detection.
filters:
# Label that matches label in listen config.
speech:
# Minimum score that triggers an audio event (default: shown below)
threshold: 0.8
# Optional: logger verbosity settings
logger:
# Optional: Default log verbosity (default: shown below)
default: info
# Optional: Component specific logger overrides
logs:
frigate.event: debug
# Optional: set environment variables
environment_vars:
EXAMPLE_VAR: value
# Optional: birdseye configuration
# NOTE: Can (enabled, mode) be overridden at the camera level
birdseye:
# Optional: Enable birdseye view (default: shown below)
enabled: True
# Optional: Restream birdseye via RTSP (default: shown below)
# NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat.
restream: False
# Optional: Width of the output resolution (default: shown below)
width: 1280
# Optional: Height of the output resolution (default: shown below)
height: 720
# Optional: Encoding quality of the mpeg1 feed (default: shown below)
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
quality: 8
# Optional: Mode of the view. Available options are: objects, motion, and continuous
# objects - cameras are included if they have had a tracked object within the last 30 seconds
# motion - cameras are included if motion was detected in the last 30 seconds
# continuous - all cameras are included always
mode: objects
# Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
ffmpeg:
# Optional: global ffmpeg args (default: shown below)
global_args: -hide_banner -loglevel warning -threads 2
# Optional: global hwaccel args (default: auto detect)
# NOTE: See hardware acceleration docs for your specific device
hwaccel_args: "auto"
# Optional: global input args (default: shown below)
input_args: preset-rtsp-generic
# Optional: global output args
output_args:
# Optional: output args for detect streams (default: shown below)
detect: -threads 2 -f rawvideo -pix_fmt yuv420p
# Optional: output args for record streams (default: shown below)
record: preset-record-generic
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Detect configuration
# NOTE: Can be overridden at the camera level
detect:
# Optional: width of the frame for the input with the detect role (default: use native stream resolution)
width: 1280
# Optional: height of the frame for the input with the detect role (default: use native stream resolution)
height: 720
# Optional: desired fps for your camera for the input with the detect role (default: shown below)
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
fps: 5
# Optional: enables detection for the camera (default: True)
enabled: True
# Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate)
min_initialized: 2
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
max_disappeared: 25
# Optional: Configuration for stationary object tracking
stationary:
# Optional: Frequency for confirming stationary objects (default: same as threshold)
# When set to 1, object detection will run to confirm the object still exists on every frame.
# If set to 10, object detection will run to confirm the object still exists on every 10th frame.
interval: 50
# Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s)
threshold: 50
# Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever)
# This can help with false positives for objects that should only be stationary for a limited amount of time.
# It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave
# car at the default.
# WARNING: Setting these values overrides default behavior and disables stationary object tracking.
# There are very few situations where you would want it disabled. It is NOT recommended to
# copy these values from the example config into your config unless you know they are needed.
max_frames:
# Optional: Default for all object types (default: not set, track forever)
default: 3000
# Optional: Object specific values
objects:
person: 1000
# Optional: Milliseconds to offset detect annotations by (default: shown below).
# There can often be latency between a recording and the detect process,
# especially when using separate streams for detect and record.
# Use this setting to make the timeline bounding boxes more closely align
# with the recording. The value can be positive or negative.
# TIP: Imagine there is an event clip with a person walking from left to right.
# If the event timeline bounding box is consistently to the left of the person
# then the value should be decreased. Similarly, if a person is walking from
# left to right and the bounding box is consistently ahead of the person
# then the value should be increased.
# TIP: This offset is dynamic so you can change the value and it will update existing
# events, this makes it easy to tune.
# WARNING: Fast moving objects will likely not have the bounding box align.
annotation_offset: 0
# Optional: Object configuration
# NOTE: Can be overridden at the camera level
objects:
# Optional: list of objects to track from labelmap.txt (default: shown below)
track:
- person
# Optional: mask to prevent all object types from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object.
# NOTE: This mask is COMBINED with the object type specific mask below
mask: 0,0,1000,0,1000,200,0,200
# Optional: filters to reduce false positives for specific object types
filters:
person:
# Optional: minimum width*height of the bounding box for the detected object (default: 0)
min_area: 5000
# Optional: maximum width*height of the bounding box for the detected object (default: 24000000)
max_area: 100000
# Optional: minimum width/height of the bounding box for the detected object (default: 0)
min_ratio: 0.5
# Optional: maximum width/height of the bounding box for the detected object (default: 24000000)
max_ratio: 2.0
# Optional: minimum score for the object to initiate tracking (default: shown below)
min_score: 0.5
# Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below)
threshold: 0.7
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
# Checks based on the bottom center of the bounding box of the object
mask: 0,0,1000,0,1000,200,0,200
# Optional: Motion configuration
# NOTE: Can be overridden at the camera level
motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.
# As a rule of thumb:
# - 10 - high sensitivity
# - 30 - medium sensitivity
# - 50 - low sensitivity
contour_area: 10
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
# Low values will cause things like moving shadows to be detected as motion for longer.
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
frame_alpha: 0.01
# Optional: Height of the resized motion frame (default: 100)
# Higher values will result in more granular motion detection at the expense of higher CPU usage.
# Lower values result in less CPU, but small changes may not register as motion.
frame_height: 100
# Optional: motion mask
# NOTE: see docs for more detailed info on creating masks
mask: 0,900,1080,900,1080,1920,0,1920
# Optional: improve contrast (default: shown below)
# Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive
# for daytime.
improve_contrast: True
# Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below).
mqtt_off_delay: 30
# Optional: Record configuration
# NOTE: Can be overridden at the camera level
record:
# Optional: Enable recording (default: shown below)
# WARNING: If recording is disabled in the config, turning it on via
# the UI or MQTT later will have no effect.
enabled: False
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
expire_interval: 60
# Optional: Sync recordings with disk on startup and once a day (default: shown below).
sync_recordings: False
# Optional: Retention settings for recording
retain:
# Optional: Number of days to retain recordings regardless of events (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in events section below
# if you only want to retain recordings of events.
days: 0
# Optional: Mode for retention. Available options are: all, motion, and active_objects
# all - save all recording segments regardless of activity
# motion - save all recordings segments with any detected motion
# active_objects - save all recording segments with active/moving objects
# NOTE: this mode only applies when the days setting above is greater than 0
mode: all
# Optional: Recording Export Settings
export:
# Optional: Timelapse Output Args (default: shown below).
# NOTE: The default args are set to fit 24 hours of recording into 1 hour playback.
# See https://stackoverflow.com/a/58268695 for more info on how these args work.
# As an example: if you wanted to go from 24 hours to 30 minutes that would be going
# from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02.
# The -r (framerate) dictates how smooth the output video is.
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
timelapse_args: "-vf setpts=0.04*PTS -r 30"
# Optional: Recording Preview Settings
preview:
# Optional: Quality of recording preview (default: shown below).
# Options are: very_low, low, medium, high, very_high
quality: medium
# Optional: Event recording settings
events:
# Optional: Number of seconds before the event to include (default: shown below)
pre_capture: 5
# Optional: Number of seconds after the event to include (default: shown below)
post_capture: 5
# Optional: Objects to save recordings for. (default: all tracked objects)
objects:
- person
# Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Retention settings for recordings of events
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Mode for retention. (default: shown below)
# all - save all recording segments for events regardless of activity
# motion - save all recordings segments for events with any detected motion
# active_objects - save all recording segments for event with active/moving objects
#
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
# here, the segments will already be gone by the time this mode is applied.
# For example, if the camera retain mode is "motion", the segments without motion are
# never stored, so setting the mode to "all" here won't bring them back.
mode: motion
# Optional: Per object retention days
objects:
person: 15
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
# NOTE: Can be overridden at the camera level
snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
enabled: False
# Optional: save a clean PNG copy of the snapshot image (default: shown below)
clean_copy: True
# Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False
# Optional: draw bounding box on the snapshots (default: shown below)
bounding_box: True
# Optional: crop the snapshot (default: shown below)
crop: False
# Optional: height to resize the snapshot to (default: original size)
height: 175
# Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Camera override for retention settings (default: global values)
retain:
# Required: Default retention days (default: shown below)
default: 10
# Optional: Per object retention days
objects:
person: 15
# Optional: quality of the encoded jpeg, 0-100 (default: shown below)
quality: 70
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.8.3)
go2rtc:
# Optional: jsmpeg stream configuration for WebUI
live:
# Optional: Set the name of the stream that should be used for live view
# in frigate WebUI. (default: name of camera)
stream_name: camera_name
# Optional: Set the height of the jsmpeg stream. (default: 720)
# This must be less than or equal to the height of the detect stream. Lower resolutions
# reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio.
height: 720
# Optional: Set the encode quality of the jsmpeg stream (default: shown below)
# 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources.
quality: 8
# Optional: in-feed timestamp style configuration
# NOTE: Can be overridden at the camera level
timestamp_style:
# Optional: Position of the timestamp (default: shown below)
# "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right)
position: "tl"
# Optional: Format specifier conform to the Python package "datetime" (default: shown below)
# Additional Examples:
# german: "%d.%m.%Y %H:%M:%S"
format: "%m/%d/%Y %H:%M:%S"
# Optional: Color of font
color:
# All Required when color is specified (default: shown below)
red: 255
green: 255
blue: 255
# Optional: Line thickness of font (default: shown below)
thickness: 2
# Optional: Effect of lettering (default: shown below)
# None (No effect),
# "solid" (solid background in inverse color of font)
# "shadow" (shadow for font)
effect: None
# Required
cameras:
# Required: name of the camera
back:
# Optional: Enable/Disable the camera (default: shown below).
# If disabled: config is used but no live stream and no capture etc.
# Events/Recordings are still viewable.
enabled: True
# Required: ffmpeg settings for the camera
ffmpeg:
# Required: A list of input streams for the camera. See documentation for more information.
inputs:
# Required: the path to the stream
# NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: audio,detect,record
# NOTICE: In addition to assigning the audio, detect, and record roles
# they must also be enabled in the camera config.
roles:
- audio
- detect
- record
# Optional: stream specific global args (default: inherit)
# global_args:
# Optional: stream specific hwaccel args (default: inherit)
# hwaccel_args:
# Optional: stream specific input args (default: inherit)
# input_args:
# Optional: camera specific global args (default: inherit)
# global_args:
# Optional: camera specific hwaccel args (default: inherit)
# hwaccel_args:
# Optional: camera specific input args (default: inherit)
# input_args:
# Optional: camera specific output args (default: inherit)
# output_args:
# Optional: timeout for highest scoring image before allowing it
# to be replaced by a newer image. (default: shown below)
best_image_timeout: 60
# Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera.
webui_url: ""
# Optional: zones for this camera
zones:
# Required: name of the zone
# NOTE: This must be different than any camera names, but can match with another zone on another
# camera.
front_steps:
# Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
coordinates: 545,1077,747,939,788,805
# Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below).
inertia: 3
# Optional: List of objects that can trigger this zone (default: all tracked objects)
objects:
- person
# Optional: Zone level object filters.
# NOTE: The global and camera filters are applied upstream.
filters:
person:
min_area: 5000
max_area: 100000
threshold: 0.7
# Optional: Configuration for the jpg snapshots published via MQTT
mqtt:
# Optional: Enable publishing snapshot via mqtt for camera (default: shown below)
# NOTE: Only applies to publishing image data to MQTT via 'frigate/<camera_name>/<object_name>/snapshot'.
# All other messages will still be published.
enabled: True
# Optional: print a timestamp on the snapshots (default: shown below)
timestamp: True
# Optional: draw bounding box on the snapshots (default: shown below)
bounding_box: True
# Optional: crop the snapshot (default: shown below)
crop: True
# Optional: height to resize the snapshot to (default: shown below)
height: 270
# Optional: jpeg encode quality (default: shown below)
quality: 70
# Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones)
required_zones: []
# Optional: Configuration for how camera is handled in the GUI.
ui:
# Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below)
# By default the cameras are sorted alphabetically.
order: 0
# Optional: Whether or not to show the camera in the Frigate UI (default: shown below)
dashboard: True
# Optional: connect to ONVIF camera
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
# Optional: username for login.
# NOTE: Some devices require admin to access ONVIF.
user: admin
# Optional: password for login.
password: admin
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:
# Optional: enable/disable object autotracking. (default: shown below)
enabled: False
# Optional: calibrate the camera on startup (default: shown below)
# A calibration will move the PTZ in increments and measure the time it takes to move.
# The results are used to help estimate the position of tracked objects after a camera move.
# Frigate will update your config file automatically after a calibration with
# a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False.
calibrate_on_startup: False
# Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below)
# Available options are: disabled, absolute, and relative
# disabled - don't zoom in/out on autotracked objects, use pan/tilt only
# absolute - use absolute zooming (supported by most PTZ capable cameras)
# relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements)
zooming: disabled
# Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below)
# A lower value will keep more of the scene in view around a tracked object.
# A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly.
# The value should be between 0.1 and 0.75
zoom_factor: 0.3
# Optional: list of objects to track from labelmap.txt (default: shown below)
track:
- person
# Required: Begin automatically tracking an object when it enters any of the listed zones.
required_zones:
- zone_name
# Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below)
return_preset: home
# Optional: Seconds to delay before returning to preset. (default: shown below)
timeout: 10
# Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below)
movement_weights: []
# Optional: Configuration for how to sort the cameras in the Birdseye view.
birdseye:
# Optional: Adjust sort order of cameras in the Birdseye view. Larger numbers come later (default: shown below)
# By default the cameras are sorted alphabetically.
order: 0
# Optional
ui:
# Optional: Set the default live mode for cameras in the UI (default: shown below)
live_mode: mse
# Optional: Set a timezone to use in the UI (default: use browser local time)
# timezone: America/Denver
# Optional: Use an experimental recordings / camera view UI (default: shown below)
use_experimental: False
# Optional: Set the time format used.
# Options are browser, 12hour, or 24hour (default: shown below)
time_format: browser
# Optional: Set the date style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 2/11/23
# medium: Feb 11, 2023
# full: Saturday, February 11, 2023
# (default: shown below).
date_style: short
# Optional: Set the time style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 8:14 PM
# medium: 8:15:22 PM
# full: 8:15:22 PM Mountain Standard Time
# (default: shown below).
time_style: medium
# Optional: Ability to manually override the date / time styling to use strftime format
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
# possible values are shown above (default: not set)
strftime_fmt: "%Y/%m/%d %H:%M"
# Optional: Telemetry configuration
telemetry:
# Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all)
network_interfaces:
- eth
- enp
- eno
- ens
- wl
- lo
# Optional: Configure system stats
stats:
# Enable AMD GPU stats (default: shown below)
amd_gpu_stats: True
# Enable Intel GPU stats (default: shown below)
intel_gpu_stats: True
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
network_bandwidth: False
# Optional: Enable the latest version outbound check (default: shown below)
# NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions
version_check: True
```

View File

@ -11,7 +11,7 @@ Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.4) to provide
:::note
You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams.
You can access the go2rtc stream info at `http://frigate_ip:5000/api/go2rtc/streams` which can be helpful to debug as well as provide useful information about your camera streams.
:::
@ -38,10 +38,6 @@ go2rtc:
**NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras.
## RTMP (Deprecated)
In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended to move to the new restream role.
## Reduce Connections To Camera
Some cameras only support one active connection or you may just want to have a single connection open to the camera. The RTSP restream allows this to be possible.

View File

@ -23,6 +23,34 @@ NOTE: There is no way to disable stationary object tracking with this value.
`threshold` is the number of frames an object needs to remain relatively still before it is considered stationary.
## Avoiding stationary objects
## Handling stationary objects
In some cases, like a driveway, you may prefer to only have an event when a car is coming & going vs a constant event of it stationary in the driveway. [This docs sections](../guides/stationary_objects.md) explains how to approach that scenario.
In some cases, like a driveway, you may prefer to only have an event when a car is coming & going vs a constant event of it stationary in the driveway. You can reference [this guide](../guides/parked_cars.md) for recommended approaches.
## Why does Frigate track stationary objects?
Frigate didn't always track stationary objects. In fact, it didn't even track objects at all initially.
Let's look at an example use case: I want to record any cars that enter my driveway.
One might simply think "Why not just run object detection any time there is motion around the driveway area and notify if the bounding box is in that zone?"
With that approach, what video is related to the car that entered the driveway? Did it come from the left or right? Was it parked across the street for an hour before turning into the driveway? One approach is to just record 24/7 or for motion (on any changed changed pixels) and not attempt to do that at all. This is what most other NVRs do. Just don't even try to identify a start and end for that object since it's hard and you will be wrong some portion of the time.
Couldn't you just look at when motion stopped and started? Motion for a video feed is nothing more than looking for pixels that are different than they were in previous frames. If the car entered the driveway while someone was mowing the grass, how would you know which motion was for the car and which was for the person when they mow along the driveway or street? What if another car was driving the other direction on the street? Or what if its a windy day and the bush by your mailbox is blowing around?
In order to do it more accurately, you need to identify objects and track them with a unique id. In each subsequent frame, everything has moved a little and you need to determine which bounding boxes go with each object from the previous frame.
Tracking objects across frames is a challenging problem. Especially if you want to do it in real time. There are entire competitions for research algorithms to see which of them can do it the most accurately. Zero of them are accurate 100% of the time. Even the ones that can't do it in realtime. There is always an error rate in the algorithm.
Now consider that the car is driving down a street that has other cars parked along it. It will drive behind some of these cars and in front of others. There may even be a car driving the opposite direction.
Let's assume for now that we are NOT already tracking two parked cars on the street or the car parked in the driveway, ie, there is no stationary object tracking.
As the car you are tracking approaches an area with 2 cars parked, the headlights reflect off the parked cars and the car parked in your driveway. The pixel values are different in that area, so there is motion detected. Object detection runs and identifies the remaining 3 cars. In the previous frame, you had a single bounding box from the car you are tracking. Now you have 4. The original object, the 2 cars on the street and the one in your driveway.
Now you have to determine which of the bounding boxes in this frame should be matched to the tracking id from the previous frame where you only had one. Remember, you have never seen these additional 3 cars before, so you know nothing about them. On top of that the bounding box for the car you are tracking has now moved to a new location, so which of the 4 belongs to the car you were originally tracking? The algorithms here are fairly good. They use a Kalman filter to predict the next location of an object using the historical bounding boxes and the bounding box closest to the predicted location is linked. It's right sometimes, but the error rate is going to be high when there are 4 possible bounding boxes.
Now let's assume that those other 3 cars were already being tracked as stationary objects, so the car driving down the street is a new 4th car. The object tracker knows we have had 3 cars and we now have 4. As the new car approaches the parked cars, the bounding boxes for all 4 cars is predicted based on the previous frames. The predicted boxes for the parked cars is pretty much a 100% overlap with the bounding boxes in the new frame. The parked cars are slam dunk matches to the tracking ids they had before and the only one left is the remaining bounding box which gets assigned to the new car. This results in a much lower error rate. Not perfect, but better.
The most difficult scenario that causes IDs to be assigned incorrectly is when an object completely occludes another object. When a car drives in front of another car and its no longer visible, a bounding box disappeared and it's a bit of a toss up when assigning the id since it's difficult to know which one is in front of the other. This happens for cars passing in front of other cars fairly often. It's something that we want to improve in the future.

View File

@ -5,6 +5,9 @@ title: Zones
Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone.
For example, the cat in this image is currently in Zone 1, but **not** Zone 2.
![bottom center](/img/bottom-center.jpg)
Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera.
During testing, enable the Zones option for the debug feed so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.

View File

@ -0,0 +1,58 @@
---
id: glossary
title: Glossary
---
The glossary explains terms commonly used in Frigate's documentation.
## Bounding Box
A box returned from the object detection model that outlines an object in the frame. These have multiple colors depending on object type in the debug live view.
## Event
The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Events are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved.
## False Positive
An incorrect detection of an object type. For example a dog being detected as a person, a chair being detected as a dog, etc. A person being detected in an area you want to ignore is not a false positive.
## Mask
There are two types of masks in Frigate. [See the mask docs for more info](/configuration/masks)
### Motion Mask
Motion masks prevent detection of [motion](#motion) in masked areas from triggering Frigate to run object detection, but do not prevent objects from being detected if object detection runs due to motion in nearby areas. For example: camera timestamps, skies, the tops of trees, etc.
### Object Mask
Object filter masks drop any bounding boxes where the bottom center (overlap doesn't matter) is in the masked area. It forces them to be considered a [false positive](#false_positive) so that they are ignored.
## Min Score
The lowest score that an object can be detected with during tracking, any detection with a lower score will be assumed to be a false positive
## Motion
When pixels in the current camera frame are different than previous frames. When many nearby pixels are different in the current frame they grouped together and indicated with a red motion box in the live debug view. [See the motion detection docs for more info](/configuration/motion_detection)
## Region
A portion of the camera frame that is sent to object detection, regions can be sent due to motion, active objects, or occasionally for stationary objects. These are represented by green boxes in the debug live view.
## Snapshot Score
The score shown in a snapshot is the score of that object at that specific moment in time.
## Threshold
The threshold is the median score that an object must reach in order to be considered a true positive.
## Top Score
The top score for an object is the highest median score for an object.
## Zone
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones)

View File

@ -21,13 +21,12 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o
## Server
My current favorite is the Minisforum GK41 because of the dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| Odyssey X86 Blue J4125 (<a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) (<a href="https://www.seeedstudio.com/Frigate-NVR-with-Odyssey-Blue-and-Coral-USB-Accelerator.html?utm_source=Frigate" target="_blank" rel="nofollow noopener sponsored">SeeedStudio</a>) | 9-10ms | M.2 B+M, USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Minisforum GK41 (<a href="https://amzn.to/3ptnb8D" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| Beelink EQ12 (<a href="https://amzn.to/3OlTMJY" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
## Detectors
@ -98,6 +97,7 @@ Inference speed will vary depending on the YOLO model, jetson platform and jetso
#### Rockchip SoC
Frigate supports SBCs with the following Rockchip SoCs:
- RK3566/RK3568
- RK3588/RK3588S
- RV1103/RV1106

View File

@ -47,6 +47,12 @@ services:
...
```
:::caution
Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder.
:::
### Calculating required shm-size
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
@ -289,7 +295,6 @@ docker run \
--network=bridge \
--privileged \
--workdir=/opt/frigate \
-p 1935:1935 \
-p 5000:5000 \
-p 8554:8554 \
-p 8555:8555 \

View File

@ -9,7 +9,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
- Live stream support for cameras in Home Assistant Integration
- RTSP (instead of RTMP) relay for use with other consumers to reduce the number of connections to your camera streams
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
# Setup a go2rtc stream

View File

@ -327,9 +327,15 @@ cameras:
By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference).
### Step 7: Follow up guides
### Step 7: Complete config
Now that you have a working install, you can use the following guides for additional features:
At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options.
1. [Configuring go2rtc](configuring_go2rtc) - Additional live view options and RTSP relay
### Follow up
Now that you have a working install, you can use the following documentation for additional features:
1. [Configuring go2rtc](configuring_go2rtc.md) - Additional live view options and RTSP relay
2. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant
3. [Masks](../configuration/masks.md)
4. [Zones](../configuration/zones.md)

View File

@ -1,9 +1,9 @@
---
id: ha_network_storage
title: HA Network Storage
title: Home Assistant network storage
---
As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons.
As of Home Asisstant Core 2023.6, Network Mounted Storage is supported for addons.
## Setting Up Remote Storage For Frigate
@ -16,6 +16,7 @@ As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons
1. Stop the Frigate addon
2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding:
```yaml
database:
path: /config/frigate.db

View File

@ -0,0 +1,71 @@
---
id: parked_cars
title: Handling parked cars
---
:::tip
This is an area targeted for improvement in future releases.
:::
Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated events of parked cars and/or long running events after the car parks. This can cause Frigate to store more video than desired.
:::caution
It is not recommended to use motion masks to try and eliminate parked cars in your driveway. Motion masks are designed to prevent motion from triggering object detection and will not prevent objects from being detected in the area if motion is detected outside of the motion mask.
:::
## Repeated events of parked cars
To only be notified of cars that enter your driveway from the street, you can create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has contains the entrance zone.
See [this example](../configuration/zones.md#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types.
![Driveway Zones](/img/driveway_zones-min.png)
To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file.
```yaml
camera:
record:
events:
required_zones:
- zone_2
zones:
zone_1:
coordinates: ... (parking area)
zone_2:
coordinates: ... (entrance to driveway)
```
This will only save events if the car entered the entrance zone at any point.
## Long running events
There are a few recommended approaches to avoid excessive storage use due to parked cars. These can be used in combination.
### 1. Use `motion` or `active_objects` mode for event recordings
Leverages [recording settings](../configuration/record.md#what-do-the-different-retain-modes-mean) to avoid excess storage use.
#### Advantages of this approach
For users using `motion` mode for continuous recording, this successfully avoids extra video from being stored for cars parked in view because all motion video is already being saved.
#### Limitations of this approach
For users that only want to record motion during events, long running events will result in all motion being stored as long as the car is in view. You can mitigate this further by using the `active_objects` mode for event recordings, but that may result less video being retained than is desired.
### 2. Use an object mask to prevent detections in the parking zone
Leverages [object filter masks](../configuration/masks.md#object-filter-masks) to prevent detections of cars parked in the driveway.
#### Advantages of this approach
Using this approach, you will get two separate events for when a car enters the driveway, parks in the parking zone, and then later leaves the zone. Using an object mask will ensure that cars parked in the parking zone are not detected and confused with cars driving by on the street as well.
#### Limitations of this approach
This approach will only work for cars that park in the parking zone. Cars that park in other areas will still be tracked as long as they are in view. This will also prevent zone sensors from telling you if a car is parked in the parking zone from working.

View File

@ -1,43 +0,0 @@
---
id: stationary_objects
title: Avoiding stationary objects
---
Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated notifications or events of a parked car being repeatedly detected over the course of multiple days (for example if the car is lost at night and detected again the following morning).
You can use zones to restrict events and notifications to objects that have entered specific areas.
:::caution
It is not recommended to use masks to try and eliminate parked cars in your driveway. Masks are designed to prevent motion from triggering object detection and/or to indicate areas that are guaranteed false positives.
Frigate is designed to track objects as they move and over-masking can prevent it from knowing that an object in the current frame is the same as the previous frame. You want Frigate to detect objects everywhere and configure your events and alerts to be based on the location of the object with zones.
:::
:::info
Once a vehicle crosses the entrance into the parking area, that event will stay `In Progress` until it is no longer seen in the frame. Frigate is designed to have an event last as long as an object is visible in the frame, an event being `In Progress` does not mean the event is being constantly recorded. You can define the recording behavior by adjusting the [recording retention settings](../configuration/record.md).
:::
To only be notified of cars that enter your driveway from the street, you could create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has more than 1 zone.
See [this example](../configuration/zones.md#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types.
![Driveway Zones](/img/driveway_zones-min.png)
To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file. Example below.
```yaml
camera:
record:
events:
required_zones:
- zone_2
zones:
zone_1:
coordinates: ... (parking area)
zone_2:
coordinates: ... (entrance to driveway)
```

View File

@ -124,10 +124,6 @@ https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
<a name="streams"></a>
## RTMP stream
RTMP is deprecated and it is recommended to switch to use RTSP restreams.
## RTSP stream
In order for the live streams to function they need to be accessible on the RTSP

View File

@ -37,11 +37,13 @@ Snapshots must be enabled to be able to submit examples to Frigate+
:::
![Send To Plus](/img/send-to-plus.png)
![Send To Plus](/img/plus/send-to-plus.jpg)
![Submit To Plus](/img/plus/submit-to-plus.jpg)
### Annotate and verify
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image.
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [improving your model](../plus/improving_model.md).
![Annotate](/img/annotate.png)
@ -56,7 +58,7 @@ model:
Models are downloaded into the `/config/model_cache` folder and only downloaded if needed.
You can override the labelmap for Frigate+ models like this:
If needed, you can override the labelmap for Frigate+ models. This is not recommended as renaming labels will break the Submit to Frigate+ feature if the labels are not available in Frigate+.
```yaml
model:

28
docs/docs/plus/faq.md Normal file
View File

@ -0,0 +1,28 @@
---
id: faq
title: FAQ
---
### Are my models trained just on my image uploads? How are they built?
Frigate+ models are built by fine tuning a base model with the images you have annotated and verified. The base model is trained from scratch from a sampling of images across all Frigate+ user submissions and takes weeks of expensive GPU resources to train. If the models were built using your image uploads alone, you would need to provide tens of thousands of examples and it would take more than a week (and considerable cost) to train. Diversity helps the model generalize.
### What is a training credit and how do I use them?
Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account. When new base models are available, it will require the use of a training credit to generate a new user model on the new base model.
### Are my video feeds sent to the cloud for analysis when using Frigate+ models?
No. Frigate+ models are a drop in replacement for the default model. All processing is performed locally as always. The only images sent to Frigate+ are the ones you specifically submit via the `Send to Frigate+` button or upload directly.
### Can I label anything I want and train the model to recognize something custom for me?
Not currently. At the moment, the set of labels will be consistent for all users. The focus will be on expanding that set of labels before working on completely custom user labels.
### Can Frigate+ models be used offline?
Yes. Models and metadata are stored in the `model_cache` directory within the config folder. Frigate will only attempt to download a model if it does not exist in the cache. This means you can backup the directory and/or use it completely offline.
### Can I keep using my Frigate+ models even if I do not renew my subscription?
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained using the training credits that you purchased are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.

View File

@ -0,0 +1,63 @@
---
id: first_model
title: Requesting your first model
---
## Step 1: Upload and annotate your images
Before requesting your first model, you will need to upload at least 10 images to Frigate+. But for the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
For more detailed recommendations, you can refer to the docs on [improving your model](./improving_model.md).
## Step 2: Submit a model request
Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the training credits that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
![Plus Models Page](/img/plus/plus-models.jpg)
## Step 3: Set your model id in the config
You will receive an email notification when your Frigate+ model is ready.
![Model Ready Email](/img/plus/model-ready-email.jpg)
Models available in Frigate+ can be used with a special model path. No other information needs to be configured because it fetches the remaining config from Frigate+ automatically.
```yaml
model:
path: plus://<your_model_id>
```
## Step 4: Adjust your object filters for higher scores
Frigate+ models generally have much higher scores than the default model provided in Frigate. You will likely need to increase your `threshold` and `min_score` values. Here is an example of how these values can be refined, but you should expect these to evolve as your model improves. For more information about how `threshold` and `min_score` are related, see the docs on [object filters](../configuration/object_filters.md#object-scores).
```yaml
objects:
filters:
dog:
min_score: .7
threshold: .9
cat:
min_score: .65
threshold: .8
face:
min_score: .7
package:
min_score: .65
threshold: .9
license_plate:
min_score: .6
amazon:
min_score: .75
ups:
min_score: .75
fedex:
min_score: .75
person:
min_score: .65
threshold: .85
car:
min_score: .65
threshold: .85
```

View File

@ -0,0 +1,33 @@
---
id: improving_model
title: Improving your model
---
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+. Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
- **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
## Properly labeling images
For the best results, follow the following guidelines.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime.
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
![Fedex Logo](/img/plus/fedex-logo.jpg)
## False positive labels
False positives will be shown with a read box and the label will have a strike through.
![false positive](/img/plus/false-positive.jpg)
Misidentified objects should have a correct label added. For example, if a person was mistakenly detected as a cat, you should submit it as a false positive in Frigate and add a label for the person. The boxes will overlap.
![add image](/img/plus/false-positive-overlap.jpg)

View File

@ -1,115 +1,37 @@
---
id: index
title: Models Guide
title: Models
---
Frigate+ offers models trained from scratch and specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. By uploading your own labeled examples, your model is tuned for accuracy in your specific conditions. After tuning, performance is evaluated against a broad dataset and real world examples submitted by other Frigate+ users to prevent overfitting.
<a href="https://plus.frigate.video" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
With a subscription, and at each annual renewal, you will receive 12 model training credits that can be used to train tuned models. If you cancel your subscription, you will keep your existing credits and retain access to any trained models. Users with an active subscription can purchase additional training credits for $5 each.
:::info
Information on how to integrate Frigate+ with Frigate can be found in the [integrations docs](/integrations/plus).
The baseline model isn't directly available after subscribing. This may change in the future, but for now you will need to submit a model request with the minimum number of images.
## Improving your model
:::
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. Because a limited number of users submitted images to Frigate+ prior to this launch, you may need to submit several hundred images per camera to see good results. With all the new images now being submitted, future base models will improve as more and more users (including you) submit examples to Frigate+.
With a subscription, and at each annual renewal, you will receive 12 model training credits. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional training credits.
False positives can be reduced by submitting **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
You may find that it's helpful to lower your thresholds a little in order to generate more false/true positives near the threshold value. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
## Supported detector types
Note that only verified images will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
:::warning
In order to request your first model, you will need to have annotated and verified at least 10 images. Each subsequent model request will require that 10 additional images are verified. However, this is the bare minimum. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night.
Frigate+ models are not supported for TensorRT or OpenVino yet.
As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
## Properly labeling images
For the best results, follow the following guidelines.
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime.
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
![Fedex Logo](/img/plus/fedex-logo.jpg)
## Frequently asked questions
### Are my models trained just on my image uploads? How are they built?
Frigate+ models are built by fine tuning a base model with the images you have annotated and verified. The base model is trained from scratch from a sampling of images across all Frigate+ user submissions and takes weeks of expensive GPU resources to train. If the models were built using your image uploads alone, you would need to provide tens of thousands of examples and it would take more than a week (and considerable cost) to train. Diversity helps the model generalize.
### What is a training credit and how do I use them?
Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account. When new base models are available, it will require the use of a training credit to generate a new user model on the new base model.
### Are my video feeds sent to the cloud for analysis when using Frigate+ models?
No. Frigate+ models are a drop in replacement for the default model. All processing is performed locally as always. The only images sent to Frigate+ are the ones you specifically submit via the `Send to Frigate+` button or upload directly.
### Can I label anything I want and train the model to recognize something custom for me?
Not currently. At the moment, the set of labels will be consistent for all users. The focus will be on expanding that set of labels before working on completely custom user labels.
### Can Frigate+ models be used offline?
Yes. Models and metadata are stored in the `model_cache` directory within the config folder. Frigate will only attempt to download a model if it does not exist in the cache. This means you can backup the directory and/or use it completely offline.
### Can I keep using my Frigate+ models even if I do not renew my subscription?
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained using the training credits that you purchased are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.
## Important model information
### Supported Model Types
:::
Currently, Frigate+ models only support CPU (`cpu`) and Coral (`edgetpu`) models. OpenVino is next in line to gain support.
The models are created using the same MobileDet architecture as the default model. Additional architectures will be added in future releases as needed.
### Higher Scores
Frigate+ models generally have much higher scores than the default model provided in Frigate. You will likely need to increase your `threshold` and `min_score` values. Here is an example of how these values can be refined, but you should expect these to evolve as your model improves:
```yaml
objects:
filters:
dog:
min_score: .7
threshold: .9
cat:
min_score: .65
threshold: .8
face:
min_score: .7
package:
min_score: .65
threshold: .9
license_plate:
min_score: .6
amazon:
min_score: .75
ups:
min_score: .75
fedex:
min_score: .75
person:
min_score: .65
threshold: .85
car:
min_score: .65
threshold: .85
```
### Available label types
## Available label types
Frigate+ models support a more relevant set of objects for security cameras. Currently, only the following objects are supported: `person`, `face`, `car`, `license_plate`, `amazon`, `ups`, `fedex`, `package`, `dog`, `cat`, `deer`. Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
#### Label attributes
### Label attributes
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate events. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.

View File

@ -6,19 +6,20 @@ module.exports = {
"frigate/installation",
"frigate/camera_setup",
"frigate/video_pipeline",
"frigate/glossary",
],
Guides: [
"guides/getting_started",
"guides/configuring_go2rtc",
"guides/false_positives",
"guides/ha_notifications",
"guides/ha_network_storage",
"guides/stationary_objects",
"guides/parked_cars",
"guides/reverse_proxy",
],
Configuration: {
"Configuration Files": [
"configuration/index",
"configuration/reference",
{
type: "link",
label: "Go2RTC Configuration Reference",
@ -41,10 +42,11 @@ module.exports = {
"configuration/camera_specific",
],
Objects: [
"configuration/object_filters",
"configuration/masks",
"configuration/zones",
"configuration/objects",
"configuration/stationary_objects",
"configuration/zones",
],
"Extra Configuration": [
"configuration/hardware_acceleration",
@ -59,7 +61,12 @@ module.exports = {
"integrations/mqtt",
"integrations/third_party_extensions",
],
"Frigate+": ["plus/index"],
"Frigate+": [
"plus/index",
"plus/first_model",
"plus/improving_model",
"plus/faq",
],
Troubleshooting: [
"troubleshooting/faqs",
"troubleshooting/recordings",

BIN
docs/static/img/bottom-center-mask.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

BIN
docs/static/img/bottom-center.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

BIN
docs/static/img/plus/false-positive.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

BIN
docs/static/img/plus/plus-models.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

BIN
docs/static/img/plus/send-to-plus.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

BIN
docs/static/img/plus/submit-to-plus.jpg vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View File

@ -37,10 +37,17 @@ from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings, RecordingsToDelete, Regions, Timeline
from frigate.models import (
Event,
Previews,
Recordings,
RecordingsToDelete,
Regions,
Timeline,
)
from frigate.object_detection import ObjectDetectProcess
from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames
from frigate.output.output import output_frames
from frigate.plus import PlusApi
from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.ptz.onvif import OnvifController
@ -271,6 +278,7 @@ class FrigateApp:
def init_database(self) -> None:
def vacuum_db(db: SqliteExtDatabase) -> None:
logger.info("Running database vacuum")
db.execute_sql("VACUUM;")
try:
@ -368,7 +376,7 @@ class FrigateApp:
60, 10 * len([c for c in self.config.cameras.values() if c.enabled])
),
)
models = [Event, Recordings, RecordingsToDelete, Regions, Timeline]
models = [Event, Recordings, RecordingsToDelete, Previews, Regions, Timeline]
self.db.bind(models)
def init_stats(self) -> None:
@ -487,6 +495,7 @@ class FrigateApp:
args=(
self.config,
self.video_output_queue,
self.inter_process_queue,
self.camera_metrics,
),
)

View File

@ -5,8 +5,8 @@ from abc import ABC, abstractmethod
from typing import Any, Callable
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import INSERT_MANY_RECORDINGS, REQUEST_REGION_GRID
from frigate.models import Recordings
from frigate.const import INSERT_MANY_RECORDINGS, INSERT_PREVIEW, REQUEST_REGION_GRID
from frigate.models import Previews, Recordings
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes
from frigate.util.object import get_camera_regions_grid
@ -102,6 +102,8 @@ class Dispatcher:
max(self.config.model.width, self.config.model.height),
)
)
elif topic == INSERT_PREVIEW:
Previews.insert(payload).execute()
else:
self.publish(topic, payload, retain=False)

View File

@ -30,7 +30,6 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_scale,
parse_preset_input,
parse_preset_output_record,
parse_preset_output_rtmp,
)
from frigate.plus import PlusApi
from frigate.util.builtin import (
@ -40,7 +39,7 @@ from frigate.util.builtin import (
load_config_with_no_duplicates,
)
from frigate.util.image import create_mask
from frigate.util.services import get_video_properties
from frigate.util.services import auto_detect_hwaccel, get_video_properties
logger = logging.getLogger(__name__)
@ -260,6 +259,20 @@ class RecordExportConfig(FrigateBaseModel):
)
class RecordQualityEnum(str, Enum):
very_low = "very_low"
low = "low"
medium = "medium"
high = "high"
very_high = "very_high"
class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium, title="Quality of recording preview."
)
class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
sync_recordings: bool = Field(
@ -278,6 +291,9 @@ class RecordConfig(FrigateBaseModel):
export: RecordExportConfig = Field(
default_factory=RecordExportConfig, title="Recording Export Config"
)
preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig, title="Recording Preview Config"
)
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of recording."
)
@ -565,7 +581,6 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-pix_fmt",
"yuv420p",
]
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-rtmp-generic"
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
@ -578,10 +593,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
)
rtmp: Union[str, List[str]] = Field(
default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="RTMP role FFmpeg output arguments.",
)
class FfmpegConfig(FrigateBaseModel):
@ -589,7 +600,7 @@ class FfmpegConfig(FrigateBaseModel):
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
)
hwaccel_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg hardware acceleration arguments."
default="auto", title="FFmpeg hardware acceleration arguments."
)
input_args: Union[str, List[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
@ -607,7 +618,6 @@ class FfmpegConfig(FrigateBaseModel):
class CameraRoleEnum(str, Enum):
audio = "audio"
record = "record"
rtmp = "rtmp"
detect = "detect"
@ -716,10 +726,6 @@ class CameraMqttConfig(FrigateBaseModel):
)
class RtmpConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="RTMP restreaming enabled.")
class CameraLiveConfig(FrigateBaseModel):
stream_name: str = Field(default="", title="Name of restream to use as live view.")
height: int = Field(default=720, title="Live camera view height")
@ -755,9 +761,6 @@ class CameraConfig(FrigateBaseModel):
record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration."
)
rtmp: RtmpConfig = Field(
default_factory=RtmpConfig, title="RTMP restreaming configuration."
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
@ -802,7 +805,6 @@ class CameraConfig(FrigateBaseModel):
# add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1:
has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", [])
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
config["ffmpeg"]["inputs"][0]["roles"] = [
@ -813,9 +815,6 @@ class CameraConfig(FrigateBaseModel):
if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
if has_rtmp:
config["ffmpeg"]["inputs"][0]["roles"].append("rtmp")
super().__init__(**config)
@property
@ -855,15 +854,7 @@ class CameraConfig(FrigateBaseModel):
)
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled:
rtmp_args = get_ffmpeg_arg_list(
parse_preset_output_rtmp(self.ffmpeg.output_args.rtmp)
or self.ffmpeg.output_args.rtmp
)
ffmpeg_output_args = (
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
)
if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(self.ffmpeg.output_args.record)
@ -950,11 +941,6 @@ def verify_config_roles(camera_config: CameraConfig) -> None:
f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
)
if camera_config.rtmp.enabled and "rtmp" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input."
)
if camera_config.audio.enabled and "audio" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
@ -1065,9 +1051,6 @@ class FrigateConfig(FrigateBaseModel):
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration."
)
rtmp: RtmpConfig = Field(
default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
@ -1114,6 +1097,10 @@ class FrigateConfig(FrigateBaseModel):
elif config.objects.filters[attribute].min_score == 0.5:
config.objects.filters[attribute].min_score = 0.7
# auto detect hwaccel args
if config.ffmpeg.hwaccel_args == "auto":
config.ffmpeg.hwaccel_args = auto_detect_hwaccel()
# Global config to propagate down to camera level
global_config = config.dict(
include={
@ -1121,7 +1108,6 @@ class FrigateConfig(FrigateBaseModel):
"birdseye": ...,
"record": ...,
"snapshots": ...,
"rtmp": ...,
"live": ...,
"objects": ...,
"motion": ...,
@ -1255,11 +1241,6 @@ class FrigateConfig(FrigateBaseModel):
verify_zone_objects_are_tracked(camera_config)
verify_autotrack_zones(camera_config)
if camera_config.rtmp.enabled:
logger.warning(
"RTMP restream is deprecated in favor of the restream role, recommend disabling RTMP."
)
# generate the ffmpeg commands
camera_config.create_ffmpeg_cmds()
config.cameras[name] = camera_config

View File

@ -35,6 +35,11 @@ AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000
AUDIO_MIN_CONFIDENCE = 0.5
# Ffmpeg Presets
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
# Regex Consts
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
@ -59,6 +64,7 @@ MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to
# Internal Comms Topics
INSERT_MANY_RECORDINGS = "insert_many_recordings"
INSERT_PREVIEW = "insert_preview"
REQUEST_REGION_GRID = "request_region_grid"
# Autotracking
@ -67,6 +73,6 @@ AUTOTRACKING_MAX_AREA_RATIO = 0.6
AUTOTRACKING_MOTION_MIN_DISTANCE = 20
AUTOTRACKING_MOTION_MAX_POINTS = 500
AUTOTRACKING_MAX_MOVE_METRICS = 500
AUTOTRACKING_ZOOM_OUT_HYSTERESIS = 1.2
AUTOTRACKING_ZOOM_IN_HYSTERESIS = 0.9
AUTOTRACKING_ZOOM_OUT_HYSTERESIS = 1.1
AUTOTRACKING_ZOOM_IN_HYSTERESIS = 0.95
AUTOTRACKING_ZOOM_EDGE_THRESHOLD = 0.05

View File

@ -49,12 +49,18 @@ class DeepStack(DetectionApi):
image.save(output, format="JPEG")
image_bytes = output.getvalue()
data = {"api_key": self.api_key}
response = requests.post(
self.api_url,
data=data,
files={"image": image_bytes},
timeout=self.api_timeout,
)
try:
response = requests.post(
self.api_url,
data=data,
files={"image": image_bytes},
timeout=self.api_timeout,
)
except requests.exceptions.RequestException:
logger.error("Error calling deepstack API")
return np.zeros((20, 6), np.float32)
response_json = response.json()
detections = np.zeros((20, 6), np.float32)
if response_json.get("predictions") is None:

View File

@ -43,9 +43,9 @@ def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + (
parse_preset_input(ffmpeg_input.input_args, 1)
or ffmpeg_input.input_args
or get_ffmpeg_arg_list(ffmpeg_input.input_args)
or parse_preset_input(ffmpeg.input_args, 1)
or ffmpeg.input_args
or get_ffmpeg_arg_list(ffmpeg.input_args)
)
return (
["ffmpeg", "-vn"]

View File

@ -52,7 +52,7 @@ class ExternalEventProcessor:
(
EventTypeEnum.api,
"new",
camera_config,
camera,
{
"id": event_id,
"label": label,

View File

@ -45,6 +45,12 @@ def should_update_state(prev_event: Event, current_event: Event) -> bool:
if prev_event["attributes"] != current_event["attributes"]:
return True
if prev_event["sub_label"] != current_event["sub_label"]:
return True
if len(prev_event["current_zones"]) < len(current_event["current_zones"]):
return True
return False
@ -103,6 +109,16 @@ class EventProcessor(threading.Thread):
self.handle_object_detection(event_type, camera, event_data)
elif source_type == EventTypeEnum.api:
self.timeline_queue.put(
(
camera,
source_type,
event_type,
{},
event_data,
)
)
self.handle_external_detection(event_type, event_data)
# set an end_time on events without an end_time before exiting

View File

@ -5,6 +5,7 @@ import os
from enum import Enum
from typing import Any
from frigate.const import FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI
from frigate.util.services import vainfo_hwaccel
from frigate.version import VERSION
@ -42,6 +43,11 @@ class LibvaGpuSelector:
return ""
FPS_VFR_PARAM = (
"-fps_mode vfr"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
else "-vsync 2"
)
TIMEOUT_PARAM = (
"-timeout"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
@ -57,55 +63,72 @@ _user_agent_args = [
PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
"preset-vaapi": f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
"preset-nvidia-h264": "-hwaccel cuda -hwaccel_output_format cuda",
"preset-nvidia-h265": "-hwaccel cuda -hwaccel_output_format cuda",
"preset-nvidia-mjpeg": "-hwaccel cuda -hwaccel_output_format cuda",
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
"preset-rk-h264": "-c:v h264_rkmpp_decoder",
"preset-rk-h265": "-c:v hevc_rkmpp_decoder",
}
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h265"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rk-h265": "-r {0} -vf fps={0},scale={1}:{2}",
"default": "-r {0} -vf fps={0},scale={1}:{2}",
}
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h265"] = PRESETS_HW_ACCEL_SCALE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-nvidia-h265": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
}
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[
"preset-nvidia-h264"
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[
"preset-nvidia-h265"
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m -pix_fmt yuv420p {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}",
FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v h264_nvenc {1}",
FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v h264_nvenc {1}",
"preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}",
@ -113,6 +136,14 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}",
}
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[
"preset-nvidia-h264"
] = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[FFMPEG_HWACCEL_NVIDIA]
# encoding of previews is only done on CPU due to comparable encode times and better quality from libx264
PRESETS_HW_ACCEL_ENCODE_PREVIEW = {
"default": "ffmpeg -hide_banner {0} -c:v libx264 -profile:v baseline -preset:v ultrafast {1}",
}
def parse_preset_hardware_acceleration_decode(
@ -153,6 +184,7 @@ def parse_preset_hardware_acceleration_scale(
class EncodeTypeEnum(str, Enum):
birdseye = "birdseye"
preview = "preview"
timelapse = "timelapse"
@ -162,6 +194,8 @@ def parse_preset_hardware_acceleration_encode(
"""Return the correct scaling preset or default preset if none is set."""
if type == EncodeTypeEnum.birdseye:
arg_map = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE
elif type == EncodeTypeEnum.preview:
arg_map = PRESETS_HW_ACCEL_ENCODE_PREVIEW
elif type == EncodeTypeEnum.timelapse:
arg_map = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE
@ -433,28 +467,3 @@ def parse_preset_output_record(arg: Any) -> list[str]:
return None
return PRESETS_RECORD_OUTPUT.get(arg, None)
PRESETS_RTMP_OUTPUT = {
"preset-rtmp-generic": ["-c", "copy", "-f", "flv"],
"preset-rtmp-mjpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
"preset-rtmp-jpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
"preset-rtmp-ubiquiti": [
"-c:v",
"copy",
"-f",
"flv",
"-ar",
"44100",
"-c:a",
"aac",
],
}
def parse_preset_output_rtmp(arg: Any) -> list[str]:
"""Return the correct preset if in preset format otherwise return None."""
if not isinstance(arg, str):
return None
return PRESETS_RTMP_OUTPUT.get(arg, None)

View File

@ -8,6 +8,7 @@ import re
import subprocess as sp
import time
import traceback
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from functools import reduce
from pathlib import Path
@ -15,6 +16,7 @@ from urllib.parse import unquote
import cv2
import numpy as np
import pandas as pd
import pytz
import requests
from flask import (
@ -43,7 +45,7 @@ from frigate.const import (
RECORD_DIR,
)
from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, Recordings, Regions, Timeline
from frigate.models import Event, Previews, Recordings, Regions, Timeline
from frigate.object_processing import TrackedObject
from frigate.plus import PlusApi
from frigate.ptz.onvif import OnvifController
@ -389,6 +391,17 @@ def set_sub_label(id):
new_sub_label = json.get("subLabel")
new_score = json.get("subLabelScore")
if new_sub_label is None:
return make_response(
jsonify(
{
"success": False,
"message": "A sub label must be supplied",
}
),
400,
)
if new_sub_label and len(new_sub_label) > 100:
return make_response(
jsonify(
@ -414,6 +427,7 @@ def set_sub_label(id):
)
if not event.end_time:
# update tracked object
tracked_obj: TrackedObject = (
current_app.detected_frames_processor.camera_states[
event.camera
@ -423,6 +437,11 @@ def set_sub_label(id):
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == id).execute()
event.sub_label = new_sub_label
if new_score:
@ -611,6 +630,189 @@ def timeline():
return jsonify([t for t in timeline])
@bp.route("/timeline/hourly")
def hourly_timeline():
"""Get hourly summary for timeline."""
cameras = request.args.get("cameras", "all")
labels = request.args.get("labels", "all")
before = request.args.get("before", type=float)
after = request.args.get("after", type=float)
limit = request.args.get("limit", 200)
tz_name = request.args.get("timezone", default="utc", type=str)
_, minute_modifier, _ = get_tz_modifiers(tz_name)
minute_offset = int(minute_modifier.split(" ")[0])
clauses = []
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Timeline.camera << camera_list))
if labels != "all":
label_list = labels.split(",")
clauses.append((Timeline.data["label"] << label_list))
if before:
clauses.append((Timeline.timestamp < before))
if after:
clauses.append((Timeline.timestamp > after))
if len(clauses) == 0:
clauses.append((True))
timeline = (
Timeline.select(
Timeline.camera,
Timeline.timestamp,
Timeline.data,
Timeline.class_type,
Timeline.source_id,
Timeline.source,
)
.where(reduce(operator.and_, clauses))
.order_by(Timeline.timestamp.desc())
.limit(limit)
.dicts()
.iterator()
)
count = 0
start = 0
end = 0
hours: dict[str, list[dict[str, any]]] = {}
for t in timeline:
if count == 0:
start = t["timestamp"]
else:
end = t["timestamp"]
count += 1
hour = (
datetime.fromtimestamp(t["timestamp"]).replace(
minute=0, second=0, microsecond=0
)
+ timedelta(
minutes=minute_offset,
)
).timestamp()
if hour not in hours:
hours[hour] = [t]
else:
hours[hour].insert(0, t)
return jsonify(
{
"start": start,
"end": end,
"count": count,
"hours": hours,
}
)
@bp.route("/<camera_name>/recording/hourly/activity")
def hourly_timeline_activity(camera_name: str):
"""Get hourly summary for timeline."""
if camera_name not in current_app.frigate_config.cameras:
return make_response(
jsonify({"success": False, "message": "Camera not found"}),
404,
)
before = request.args.get("before", type=float, default=datetime.now())
after = request.args.get(
"after", type=float, default=datetime.now() - timedelta(hours=1)
)
tz_name = request.args.get("timezone", default="utc", type=str)
_, minute_modifier, _ = get_tz_modifiers(tz_name)
minute_offset = int(minute_modifier.split(" ")[0])
all_recordings: list[Recordings] = (
Recordings.select(
Recordings.start_time,
Recordings.duration,
Recordings.objects,
Recordings.motion,
)
.where(Recordings.camera == camera_name)
.where(Recordings.motion > 0)
.where((Recordings.start_time > after) & (Recordings.end_time < before))
.order_by(Recordings.start_time.asc())
.iterator()
)
# data format is ex:
# {timestamp: [{ date: 1, count: 1, type: motion }]}] }}
hours: dict[int, list[dict[str, any]]] = defaultdict(list)
key = datetime.fromtimestamp(after).replace(second=0, microsecond=0) + timedelta(
minutes=minute_offset
)
check = (key + timedelta(hours=1)).timestamp()
# set initial start so data is representative of full hour
hours[int(key.timestamp())].append(
[
key.timestamp(),
0,
False,
]
)
for recording in all_recordings:
if recording.start_time > check:
hours[int(key.timestamp())].append(
[
(key + timedelta(minutes=59, seconds=59)).timestamp(),
0,
False,
]
)
key = key + timedelta(hours=1)
check = (key + timedelta(hours=1)).timestamp()
hours[int(key.timestamp())].append(
[
key.timestamp(),
0,
False,
]
)
data_type = recording.objects > 0
count = recording.motion + recording.objects
hours[int(key.timestamp())].append(
[
recording.start_time + (recording.duration / 2),
0 if count == 0 else np.log2(count),
data_type,
]
)
# resample data using pandas to get activity on minute to minute basis
for key, data in hours.items():
df = pd.DataFrame(data, columns=["date", "count", "hasObjects"])
# set date as datetime index
df["date"] = pd.to_datetime(df["date"], unit="s")
df.set_index(["date"], inplace=True)
# normalize data
df = df.resample("T").mean().fillna(0)
# change types for output
df.index = df.index.astype(int) // (10**9)
df["count"] = df["count"].astype(int)
df["hasObjects"] = df["hasObjects"].astype(bool)
hours[key] = df.reset_index().to_dict("records")
return jsonify(hours)
@bp.route("/<camera_name>/<label>/best.jpg")
@bp.route("/<camera_name>/<label>/thumbnail.jpg")
def label_thumbnail(camera_name, label):
@ -1674,6 +1876,7 @@ def recordings(camera_name):
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
@ -1845,7 +2048,6 @@ def vod_hour_no_timezone(year_month, day, hour, camera_name):
)
# TODO make this nicer when vod module is removed
@bp.route("/vod/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
def vod_hour(year_month, day, hour, camera_name, tz_name):
parts = year_month.split("-")
@ -1860,6 +2062,110 @@ def vod_hour(year_month, day, hour, camera_name, tz_name):
return vod_ts(camera_name, start_ts, end_ts)
@bp.route("/preview/<camera_name>/start/<int:start_ts>/end/<int:end_ts>")
@bp.route("/preview/<camera_name>/start/<float:start_ts>/end/<float:end_ts>")
def preview_ts(camera_name, start_ts, end_ts):
"""Get all mp4 previews relevant for time period."""
if camera_name != "all":
camera_clause = Previews.camera == camera_name
else:
camera_clause = True
previews = (
Previews.select(
Previews.camera,
Previews.path,
Previews.duration,
Previews.start_time,
Previews.end_time,
)
.where(
Previews.start_time.between(start_ts, end_ts)
| Previews.end_time.between(start_ts, end_ts)
| ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
)
.where(camera_clause)
.order_by(Previews.start_time.asc())
.dicts()
.iterator()
)
clips = []
preview: Previews
for preview in previews:
clips.append(
{
"camera": preview["camera"],
"src": preview["path"].replace("/media/frigate", ""),
"type": "video/mp4",
"start": preview["start_time"],
"end": preview["end_time"],
}
)
if not clips:
return make_response(
jsonify(
{
"success": False,
"message": "No previews found.",
}
),
404,
)
return make_response(jsonify(clips), 200)
@bp.route("/preview/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
def preview_hour(year_month, day, hour, camera_name, tz_name):
parts = year_month.split("-")
start_date = (
datetime(int(parts[0]), int(parts[1]), int(day), int(hour), tzinfo=timezone.utc)
- datetime.now(pytz.timezone(tz_name.replace(",", "/"))).utcoffset()
)
end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
return preview_ts(camera_name, start_ts, end_ts)
@bp.route("/preview/<camera_name>/<frame_time>/thumbnail.jpg")
def preview_thumbnail(camera_name, frame_time):
"""Get a thumbnail from the cached preview jpgs."""
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera_name}"
file_check = f"{file_start}-{frame_time}.jpg"
selected_preview = None
for file in os.listdir(preview_dir):
if file.startswith(file_start):
if file < file_check:
selected_preview = file
break
if selected_preview is None:
return make_response(
jsonify(
{
"success": False,
"message": "Could not find valid preview jpg.",
}
),
404,
)
with open(os.path.join(preview_dir, selected_preview), "rb") as image_file:
jpg_bytes = image_file.read()
response = make_response(jpg_bytes)
response.headers["Content-Type"] = "image/jpeg"
response.headers["Cache-Control"] = "private, max-age=31536000"
return response
@bp.route("/vod/event/<id>")
def vod_event(id):
try:

View File

@ -76,6 +76,15 @@ class Recordings(Model): # type: ignore[misc]
segment_size = FloatField(default=0) # this should be stored as MB
class Previews(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
# Used for temporary table in record/cleanup.py
class RecordingsToDelete(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=False, max_length=30)

View File

@ -19,6 +19,7 @@ from frigate.config import (
MqttConfig,
RecordConfig,
SnapshotsConfig,
ZoomingModeEnum,
)
from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum
@ -195,7 +196,7 @@ class TrackedObject:
self.zone_presence[name] = zone_score + 1
# an object is only considered present in a zone if it has a zone inertia of 3+
if zone_score >= zone.inertia:
if self.zone_presence[name] >= zone.inertia:
current_zones.append(name)
if name not in self.entered_zones:
@ -512,6 +513,39 @@ class CameraState:
thickness = 5
color = self.config.model.colormap[obj["label"]]
# debug autotracking zooming - show the zoom factor box
if (
self.camera_config.onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
):
max_target_box = self.ptz_autotracker_thread.ptz_autotracker.tracked_object_metrics[
self.name
]["max_target_box"]
side_length = max_target_box * (
max(
self.camera_config.detect.width,
self.camera_config.detect.height,
)
)
centroid_x = (obj["box"][0] + obj["box"][2]) // 2
centroid_y = (obj["box"][1] + obj["box"][3]) // 2
top_left = (
int(centroid_x - side_length // 2),
int(centroid_y - side_length // 2),
)
bottom_right = (
int(centroid_x + side_length // 2),
int(centroid_y + side_length // 2),
)
cv2.rectangle(
frame_copy,
top_left,
bottom_right,
(255, 255, 0),
2,
)
# draw the bounding boxes on the frame
box = obj["box"]
text = (

View File

@ -1,3 +1,5 @@
"""Handle outputting birdseye frames via jsmpeg and go2rtc."""
import datetime
import glob
import logging
@ -5,23 +7,13 @@ import math
import multiprocessing as mp
import os
import queue
import signal
import subprocess as sp
import threading
import traceback
from wsgiref.simple_server import make_server
import cv2
import numpy as np
from setproctitle import setproctitle
from ws4py.server.wsgirefserver import (
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
WSGIServer,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.ws import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.types import CameraMetricsTypes
@ -672,66 +664,20 @@ class BirdsEyeFrameManager:
return False
def output_frames(
config: FrigateConfig,
video_output_queue,
camera_metrics: dict[str, CameraMetricsTypes],
):
threading.current_thread().name = "output"
setproctitle("frigate.output")
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
previous_frames = {}
# start a websocket server on 8082
WebSocketWSGIHandler.http_version = "1.1"
websocket_server = make_server(
"127.0.0.1",
8082,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=WebSocket),
)
websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever)
inputs: dict[str, queue.Queue] = {}
converters = {}
broadcasters = {}
for camera, cam_config in config.cameras.items():
inputs[camera] = queue.Queue(maxsize=cam_config.detect.fps)
width = int(
cam_config.live.height
* (cam_config.frame_shape[1] / cam_config.frame_shape[0])
)
converters[camera] = FFMpegConverter(
camera,
inputs[camera],
stop_event,
cam_config.frame_shape[1],
cam_config.frame_shape[0],
width,
cam_config.live.height,
cam_config.live.quality,
)
broadcasters[camera] = BroadcastThread(
camera, converters[camera], websocket_server, stop_event
)
if config.birdseye.enabled:
inputs["birdseye"] = queue.Queue(maxsize=10)
converters["birdseye"] = FFMpegConverter(
class Birdseye:
def __init__(
self,
config: FrigateConfig,
frame_manager: SharedMemoryFrameManager,
camera_metrics: dict[str, CameraMetricsTypes],
stop_event: mp.Event,
websocket_server,
) -> None:
self.config = config
self.input = queue.Queue(maxsize=10)
self.converter = FFMpegConverter(
"birdseye",
inputs["birdseye"],
self.input,
stop_event,
config.birdseye.width,
config.birdseye.height,
@ -740,107 +686,48 @@ def output_frames(
config.birdseye.quality,
config.birdseye.restream,
)
broadcasters["birdseye"] = BroadcastThread(
"birdseye",
converters["birdseye"],
websocket_server,
stop_event,
self.broadcaster = BroadcastThread(
"birdseye", self.converter, websocket_server, stop_event
)
self.birdseye_manager = BirdsEyeFrameManager(
config, frame_manager, stop_event, camera_metrics
)
websocket_thread.start()
if config.birdseye.restream:
self.birdseye_buffer = frame_manager.create(
"birdseye",
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
)
for t in converters.values():
t.start()
self.converter.start()
self.broadcaster.start()
for t in broadcasters.values():
t.start()
birdseye_manager = BirdsEyeFrameManager(
config, frame_manager, stop_event, camera_metrics
)
if config.birdseye.restream:
birdseye_buffer = frame_manager.create(
"birdseye",
birdseye_manager.yuv_shape[0] * birdseye_manager.yuv_shape[1],
)
while not stop_event.is_set():
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 1)
except queue.Empty:
continue
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
def write_data(
self,
camera: str,
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
frame,
) -> None:
if self.birdseye_manager.update(
camera,
len([o for o in current_tracked_objects if not o["stationary"]]),
len(motion_boxes),
frame_time,
frame,
):
# write to the converter for the camera if clients are listening to the specific camera
frame_bytes = self.birdseye_manager.frame.tobytes()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try:
inputs[camera].put_nowait(frame.tobytes())
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
if config.birdseye.enabled and (
config.birdseye.restream
or any(
ws.environ["PATH_INFO"].endswith("birdseye")
for ws in websocket_server.manager
)
):
if birdseye_manager.update(
camera,
len([o for o in current_tracked_objects if not o["stationary"]]),
len(motion_boxes),
frame_time,
frame,
):
frame_bytes = birdseye_manager.frame.tobytes()
if config.birdseye.restream:
birdseye_buffer[:] = frame_bytes
try:
inputs["birdseye"].put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
previous_frames[camera] = frame_time
while not video_output_queue.empty():
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id)
for b in broadcasters.values():
b.join()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()
websocket_server.shutdown()
websocket_thread.join()
logger.info("exiting output process...")
def stop(self) -> None:
self.converter.join()
self.broadcaster.join()

165
frigate/output/camera.py Normal file
View File

@ -0,0 +1,165 @@
"""Handle outputting individual cameras via jsmpeg."""
import logging
import multiprocessing as mp
import queue
import subprocess as sp
import threading
from frigate.config import CameraConfig
logger = logging.getLogger(__name__)
class FFMpegConverter(threading.Thread):
def __init__(
self,
camera: str,
input_queue: queue.Queue,
stop_event: mp.Event,
in_width: int,
in_height: int,
out_width: int,
out_height: int,
quality: int,
):
threading.Thread.__init__(self)
self.name = f"{camera}_output_converter"
self.camera = camera
self.input_queue = input_queue
self.stop_event = stop_event
ffmpeg_cmd = [
"ffmpeg",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
"-video_size",
f"{in_width}x{in_height}",
"-i",
"pipe:",
"-f",
"mpegts",
"-s",
f"{out_width}x{out_height}",
"-codec:v",
"mpeg1video",
"-q",
f"{quality}",
"-bf",
"0",
"pipe:",
]
self.process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=sp.DEVNULL,
stdin=sp.PIPE,
start_new_session=True,
)
def __write(self, b) -> None:
self.process.stdin.write(b)
def read(self, length):
try:
return self.process.stdout.read1(length)
except ValueError:
return False
def exit(self):
self.process.terminate()
try:
self.process.communicate(timeout=30)
except sp.TimeoutExpired:
self.process.kill()
self.process.communicate()
def run(self) -> None:
while not self.stop_event.is_set():
try:
frame = self.input_queue.get(True, timeout=1)
self.__write(frame)
except queue.Empty:
pass
self.exit()
class BroadcastThread(threading.Thread):
def __init__(
self,
camera: str,
converter: FFMpegConverter,
websocket_server,
stop_event: mp.Event,
):
super(BroadcastThread, self).__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
def run(self):
while not self.stop_event.is_set():
buf = self.converter.read(65536)
if buf:
manager = self.websocket_server.manager
with manager.lock:
websockets = manager.websockets.copy()
ws_iter = iter(websockets.values())
for ws in ws_iter:
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
):
try:
ws.send(buf, binary=True)
except ValueError:
pass
except (BrokenPipeError, ConnectionResetError) as e:
logger.debug(f"Websocket unexpectedly closed {e}")
elif self.converter.process.poll() is not None:
break
class JsmpegCamera:
def __init__(
self, config: CameraConfig, stop_event: mp.Event, websocket_server
) -> None:
self.config = config
self.input = queue.Queue(maxsize=config.detect.fps)
width = int(
config.live.height * (config.frame_shape[1] / config.frame_shape[0])
)
self.converter = FFMpegConverter(
config.name,
self.input,
stop_event,
config.frame_shape[1],
config.frame_shape[0],
width,
config.live.height,
config.live.quality,
)
self.broadcaster = BroadcastThread(
config.name, self.converter, websocket_server, stop_event
)
self.converter.start()
self.broadcaster.start()
def write_frame(self, frame_bytes) -> None:
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def stop(self) -> None:
self.converter.join()
self.broadcaster.join()

157
frigate/output/output.py Normal file
View File

@ -0,0 +1,157 @@
"""Handle outputting raw frigate frames"""
import logging
import multiprocessing as mp
import queue
import signal
import threading
from typing import Optional
from wsgiref.simple_server import make_server
from setproctitle import setproctitle
from ws4py.server.wsgirefserver import (
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
WSGIServer,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig
from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder
from frigate.types import CameraMetricsTypes
from frigate.util.image import SharedMemoryFrameManager
logger = logging.getLogger(__name__)
def output_frames(
config: FrigateConfig,
video_output_queue: mp.Queue,
inter_process_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes],
):
threading.current_thread().name = "output"
setproctitle("frigate.output")
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
previous_frames = {}
# start a websocket server on 8082
WebSocketWSGIHandler.http_version = "1.1"
websocket_server = make_server(
"127.0.0.1",
8082,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=WebSocket),
)
websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever)
jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Optional[Birdseye] = None
preview_recorders: dict[str, PreviewRecorder] = {}
for camera, cam_config in config.cameras.items():
if not cam_config.enabled:
continue
jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server)
preview_recorders[camera] = PreviewRecorder(cam_config, inter_process_queue)
if config.birdseye.enabled:
birdseye = Birdseye(
config, frame_manager, camera_metrics, stop_event, websocket_server
)
websocket_thread.start()
while not stop_event.is_set():
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 1)
except queue.Empty:
continue
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
):
# write to the converter for the camera if clients are listening to the specific camera
jsmpeg_cameras[camera].write_frame(frame.tobytes())
# send output data to birdseye if websocket is connected or restreaming
if config.birdseye.enabled and (
config.birdseye.restream
or any(
ws.environ["PATH_INFO"].endswith("birdseye")
for ws in websocket_server.manager
)
):
birdseye.write_data(
camera,
current_tracked_objects,
motion_boxes,
frame_time,
frame,
)
# send frames for low fps recording
preview_recorders[camera].write_data(
current_tracked_objects, motion_boxes, frame_time, frame
)
# delete frames after they have been used for output
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
previous_frames[camera] = frame_time
while not video_output_queue.empty():
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id)
for jsmpeg in jsmpeg_cameras.values():
jsmpeg.stop()
for preview in preview_recorders.values():
preview.stop()
if birdseye is not None:
birdseye.stop()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()
websocket_server.shutdown()
websocket_thread.join()
logger.info("exiting output process...")

264
frigate/output/preview.py Normal file
View File

@ -0,0 +1,264 @@
"""Handle outputting low res / fps preview segments from decoded frames."""
import datetime
import logging
import multiprocessing as mp
import os
import shutil
import subprocess as sp
import threading
from pathlib import Path
import cv2
import numpy as np
from frigate.config import CameraConfig, RecordQualityEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW
from frigate.ffmpeg_presets import (
FPS_VFR_PARAM,
EncodeTypeEnum,
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Previews
from frigate.util.image import copy_yuv_to_position, get_yuv_crop
logger = logging.getLogger(__name__)
FOLDER_PREVIEW_FRAMES = "preview_frames"
PREVIEW_OUTPUT_FPS = 1
PREVIEW_SEGMENT_DURATION = 3600 # one hour
# important to have lower keyframe to maintain scrubbing performance
PREVIEW_KEYFRAME_INTERVAL = 60
PREVIEW_BIT_RATES = {
RecordQualityEnum.very_low: 4096,
RecordQualityEnum.low: 6144,
RecordQualityEnum.medium: 8192,
RecordQualityEnum.high: 12288,
RecordQualityEnum.very_high: 16384,
}
def get_cache_image_name(camera: str, frame_time: float) -> str:
"""Get the image name in cache."""
return os.path.join(
CACHE_DIR,
f"{FOLDER_PREVIEW_FRAMES}/preview_{camera}-{frame_time}.jpg",
)
class FFMpegConverter(threading.Thread):
"""Convert a list of jpg frames into a vfr mp4."""
def __init__(
self,
config: CameraConfig,
frame_times: list[float],
inter_process_queue: mp.Queue,
):
threading.Thread.__init__(self)
self.name = f"{config.name}_preview_converter"
self.config = config
self.frame_times = frame_times
self.inter_process_queue = inter_process_queue
self.path = os.path.join(
CLIPS_DIR,
f"previews/{self.config.name}/{self.frame_times[0]}-{self.frame_times[-1]}.mp4",
)
# write a PREVIEW at fps and 1 key frame per clip
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
config.ffmpeg.hwaccel_args,
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -i /dev/stdin",
output=f"-g {PREVIEW_KEYFRAME_INTERVAL} -fpsmax {PREVIEW_OUTPUT_FPS} -bf 0 -b:v {PREVIEW_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
type=EncodeTypeEnum.preview,
)
def run(self) -> None:
# generate input list
item_count = len(self.frame_times)
playlist = []
for t_idx in range(0, item_count):
if t_idx == item_count - 1:
# last frame does not get a duration
playlist.append(
f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'"
)
continue
playlist.append(
f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'"
)
playlist.append(
f"duration {self.frame_times[t_idx + 1] - self.frame_times[t_idx]}"
)
p = sp.run(
self.ffmpeg_cmd.split(" "),
input="\n".join(playlist),
encoding="ascii",
capture_output=True,
)
start = self.frame_times[0]
end = self.frame_times[-1]
if p.returncode == 0:
logger.debug("successfully saved preview")
self.inter_process_queue.put_nowait(
(
INSERT_PREVIEW,
{
Previews.id: f"{self.config.name}_{end}",
Previews.camera: self.config.name,
Previews.path: self.path,
Previews.start_time: start,
Previews.end_time: end,
Previews.duration: end - start,
},
)
)
else:
logger.error(f"Error saving preview for {self.config.name} :: {p.stderr}")
# unlink files from cache
# don't delete last frame as it will be used as first frame in next segment
for t in self.frame_times[0:-1]:
Path(get_cache_image_name(self.config.name, t)).unlink(missing_ok=True)
class PreviewRecorder:
def __init__(self, config: CameraConfig, inter_process_queue: mp.Queue) -> None:
self.config = config
self.inter_process_queue = inter_process_queue
self.start_time = 0
self.last_output_time = 0
self.output_frames = []
self.out_height = 160
self.out_width = (
int((config.detect.width / config.detect.height) * self.out_height) // 4 * 4
)
y, u1, u2, v1, v2 = get_yuv_crop(
self.config.frame_shape_yuv,
(
0,
0,
self.config.frame_shape[1],
self.config.frame_shape[0],
),
)
self.channel_dims = {
"y": y,
"u1": u1,
"u2": u2,
"v1": v1,
"v2": v2,
}
# end segment at end of hour
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
Path(os.path.join(CACHE_DIR, "preview_frames")).mkdir(exist_ok=True)
Path(os.path.join(CLIPS_DIR, f"previews/{config.name}")).mkdir(
parents=True, exist_ok=True
)
def should_write_frame(
self,
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
) -> bool:
"""Decide if this frame should be added to PREVIEW."""
# limit output to 1 fps
if (frame_time - self.last_output_time) < 1 / PREVIEW_OUTPUT_FPS:
return False
# send frame if a non-stationary object is in a zone
if any(
(len(o["current_zones"]) > 0 and not o["stationary"])
for o in current_tracked_objects
):
self.last_output_time = frame_time
return True
if len(motion_boxes) > 0:
self.last_output_time = frame_time
return True
return False
def write_frame_to_cache(self, frame_time: float, frame) -> None:
# resize yuv frame
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
copy_yuv_to_position(
small_frame,
(0, 0),
(self.out_height, self.out_width),
frame,
self.channel_dims,
cv2.INTER_AREA,
)
small_frame = cv2.cvtColor(
small_frame,
cv2.COLOR_YUV2BGR_I420,
)
_, jpg = cv2.imencode(".jpg", small_frame)
with open(
get_cache_image_name(self.config.name, frame_time),
"wb",
) as j:
j.write(jpg.tobytes())
def write_data(
self,
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
frame,
) -> None:
# always write the first frame
if self.start_time == 0:
self.start_time = frame_time
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
return
# check if PREVIEW clip should be generated and cached frames reset
if frame_time >= self.segment_end:
# save last frame to ensure consistent duration
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
FFMpegConverter(
self.config,
self.output_frames,
self.inter_process_queue,
).start()
# reset frame cache
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
self.start_time = frame_time
self.last_output_time = frame_time
self.output_frames = []
# include first frame to ensure consistent duration
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time):
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
def stop(self) -> None:
try:
shutil.rmtree(os.path.join(CACHE_DIR, FOLDER_PREVIEW_FRAMES))
except FileNotFoundError:
pass

View File

@ -527,16 +527,28 @@ class PtzAutoTracker:
return np.dot(self.move_coefficients[camera], input_data)
def _predict_area_after_time(self, camera, time):
return np.dot(
self.tracked_object_metrics[camera]["area_coefficients"],
[self.tracked_object_history[camera][-1]["frame_time"] + time],
)
def _calculate_tracked_object_metrics(self, camera, obj):
def remove_outliers(data):
Q1 = np.percentile(data, 25)
Q3 = np.percentile(data, 75)
areas = [item["area"] for item in data]
Q1 = np.percentile(areas, 25)
Q3 = np.percentile(areas, 75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
filtered_data = [x for x in data if lower_bound <= x <= upper_bound]
removed_values = [x for x in data if x not in filtered_data]
filtered_data = [
item for item in data if lower_bound <= item["area"] <= upper_bound
]
# Find and log the removed values
removed_values = [item for item in data if item not in filtered_data]
logger.debug(f"{camera}: Removed area outliers: {removed_values}")
return filtered_data
@ -548,18 +560,43 @@ class PtzAutoTracker:
# Extract areas and calculate weighted average
# grab the largest dimension of the bounding box and create a square from that
areas = [
max(obj["box"][2] - obj["box"][0], obj["box"][3] - obj["box"][1]) ** 2
{
"frame_time": obj["frame_time"],
"box": obj["box"],
"area": max(
obj["box"][2] - obj["box"][0], obj["box"][3] - obj["box"][1]
)
** 2,
}
for obj in self.tracked_object_history[camera]
]
filtered_areas = (
remove_outliers(areas)
if len(areas) >= self.config.cameras[camera].detect.fps / 2
else areas
)
filtered_areas = remove_outliers(areas) if len(areas) >= 2 else areas
# Filter entries that are not touching the frame edge
filtered_areas_not_touching_edge = [
entry
for entry in filtered_areas
if self._touching_frame_edges(camera, entry["box"]) == 0
]
# Calculate regression for area change predictions
if len(filtered_areas_not_touching_edge):
X = np.array(
[item["frame_time"] for item in filtered_areas_not_touching_edge]
)
y = np.array([item["area"] for item in filtered_areas_not_touching_edge])
self.tracked_object_metrics[camera]["area_coefficients"] = np.linalg.lstsq(
X.reshape(-1, 1), y, rcond=None
)[0]
else:
self.tracked_object_metrics[camera]["area_coefficients"] = np.array([0])
weights = np.arange(1, len(filtered_areas) + 1)
weighted_area = np.average(filtered_areas, weights=weights)
weighted_area = np.average(
[item["area"] for item in filtered_areas], weights=weights
)
self.tracked_object_metrics[camera]["target_box"] = (
weighted_area / (camera_width * camera_height)
@ -681,26 +718,27 @@ class PtzAutoTracker:
self._calculate_move_coefficients(camera)
def _enqueue_move(self, camera, frame_time, pan, tilt, zoom):
def split_value(value):
def split_value(value, suppress_diff=True):
clipped = np.clip(value, -1, 1)
return clipped, value - clipped
# don't make small movements
if -0.05 < clipped < 0.05 and suppress_diff:
diff = 0.0
else:
diff = value - clipped
return clipped, diff
if (
frame_time > self.ptz_metrics[camera]["ptz_start_time"].value
and frame_time > self.ptz_metrics[camera]["ptz_stop_time"].value
and not self.move_queue_locks[camera].locked()
):
# don't make small movements
if abs(pan) < 0.02:
pan = 0
if abs(tilt) < 0.02:
tilt = 0
# split up any large moves caused by velocity estimated movements
while pan != 0 or tilt != 0 or zoom != 0:
pan, pan_excess = split_value(pan)
tilt, tilt_excess = split_value(tilt)
zoom, zoom_excess = split_value(zoom)
zoom, zoom_excess = split_value(zoom, False)
logger.debug(
f"{camera}: Enqueue movement for frame time: {frame_time} pan: {pan}, tilt: {tilt}, zoom: {zoom}"
@ -712,6 +750,21 @@ class PtzAutoTracker:
tilt = tilt_excess
zoom = zoom_excess
def _touching_frame_edges(self, camera, box):
camera_config = self.config.cameras[camera]
camera_width = camera_config.frame_shape[1]
camera_height = camera_config.frame_shape[0]
bb_left, bb_top, bb_right, bb_bottom = box
edge_threshold = AUTOTRACKING_ZOOM_EDGE_THRESHOLD
return int(
(bb_left < edge_threshold * camera_width)
+ (bb_right > (1 - edge_threshold) * camera_width)
+ (bb_top < edge_threshold * camera_height)
+ (bb_bottom > (1 - edge_threshold) * camera_height)
)
def _get_valid_velocity(self, camera, obj):
# returns a tuple and euclidean distance if the estimated velocity is valid
# if invalid, returns [0, 0] and -1
@ -722,7 +775,9 @@ class PtzAutoTracker:
# estimate_velocity is a numpy array of bbox top,left and bottom,right velocities
velocities = obj.obj_data["estimate_velocity"]
logger.debug(f"{camera}: Velocities from norfair: {velocities}")
logger.debug(
f"{camera}: Velocity (Norfair): {tuple(np.round(velocities).flatten().astype(int))}"
)
# if we are close enough to zero, return right away
if np.all(np.round(velocities) == 0):
@ -827,7 +882,7 @@ class PtzAutoTracker:
return distance_threshold
def _should_zoom_in(self, camera, obj, box, debug_zooming=False):
def _should_zoom_in(self, camera, obj, box, predicted_time, debug_zooming=False):
# returns True if we should zoom in, False if we should zoom out, None to do nothing
camera_config = self.config.cameras[camera]
camera_width = camera_config.frame_shape[1]
@ -838,9 +893,6 @@ class PtzAutoTracker:
bb_left, bb_top, bb_right, bb_bottom = box
# TODO: Take into account the area changing when an object is moving out of frame
edge_threshold = AUTOTRACKING_ZOOM_EDGE_THRESHOLD
# calculate a velocity threshold based on movement coefficients if available
if camera_config.onvif.autotracking.movement_weights:
predicted_movement_time = self._predict_movement_time(camera, 1, 1)
@ -851,12 +903,8 @@ class PtzAutoTracker:
velocity_threshold_x = camera_width * 0.02
velocity_threshold_y = camera_height * 0.02
touching_frame_edges = int(
(bb_left < edge_threshold * camera_width)
+ (bb_right > (1 - edge_threshold) * camera_width)
+ (bb_top < edge_threshold * camera_height)
+ (bb_bottom > (1 - edge_threshold) * camera_height)
)
# return a count of the number of frame edges the bounding box is touching
touching_frame_edges = self._touching_frame_edges(camera, box)
# make sure object is centered in the frame
below_distance_threshold = self.tracked_object_metrics[camera][
@ -873,19 +921,28 @@ class PtzAutoTracker:
< np.tile([velocity_threshold_x, velocity_threshold_y], 2)
) or np.all(average_velocity == 0)
if not predicted_time:
calculated_target_box = self.tracked_object_metrics[camera]["target_box"]
else:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
] + self._predict_area_after_time(camera, predicted_time) / (
camera_width * camera_height
)
below_area_threshold = (
self.tracked_object_metrics[camera]["target_box"]
calculated_target_box
< self.tracked_object_metrics[camera]["max_target_box"]
)
# introduce some hysteresis to prevent a yo-yo zooming effect
zoom_out_hysteresis = (
self.tracked_object_metrics[camera]["target_box"]
calculated_target_box
> self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_OUT_HYSTERESIS
)
zoom_in_hysteresis = (
self.tracked_object_metrics[camera]["target_box"]
calculated_target_box
< self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_IN_HYSTERESIS
)
@ -902,13 +959,13 @@ class PtzAutoTracker:
# debug zooming
if debug_zooming:
logger.debug(
f"{camera}: Zoom test: touching edges: count: {touching_frame_edges} left: {bb_left < edge_threshold * camera_width}, right: {bb_right > (1 - edge_threshold) * camera_width}, top: {bb_top < edge_threshold * camera_height}, bottom: {bb_bottom > (1 - edge_threshold) * camera_height}"
f"{camera}: Zoom test: touching edges: count: {touching_frame_edges} left: {bb_left < AUTOTRACKING_ZOOM_EDGE_THRESHOLD * camera_width}, right: {bb_right > (1 - AUTOTRACKING_ZOOM_EDGE_THRESHOLD) * camera_width}, top: {bb_top < AUTOTRACKING_ZOOM_EDGE_THRESHOLD * camera_height}, bottom: {bb_bottom > (1 - AUTOTRACKING_ZOOM_EDGE_THRESHOLD) * camera_height}"
)
logger.debug(
f"{camera}: Zoom test: below distance threshold: {(below_distance_threshold)}"
)
logger.debug(
f"{camera}: Zoom test: below area threshold: {(below_area_threshold)} target: {self.tracked_object_metrics[camera]['target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']}"
f"{camera}: Zoom test: below area threshold: {(below_area_threshold)} target: {self.tracked_object_metrics[camera]['target_box']}, calculated: {calculated_target_box}, max: {self.tracked_object_metrics[camera]['max_target_box']}"
)
logger.debug(
f"{camera}: Zoom test: below dimension threshold: {below_dimension_threshold} width: {bb_right - bb_left}, max width: {camera_width * (self.zoom_factor[camera] + 0.1)}, height: {bb_bottom - bb_top}, max height: {camera_height * (self.zoom_factor[camera] + 0.1)}"
@ -919,10 +976,10 @@ class PtzAutoTracker:
logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}")
logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}")
logger.debug(
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {self.tracked_object_metrics[camera]["target_box"]}'
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
)
logger.debug(
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {self.tracked_object_metrics[camera]["target_box"]}'
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
)
# Zoom in conditions (and)
@ -961,6 +1018,7 @@ class PtzAutoTracker:
camera_width = camera_config.frame_shape[1]
camera_height = camera_config.frame_shape[0]
camera_fps = camera_config.detect.fps
predicted_movement_time = 0
average_velocity = np.zeros((4,))
predicted_box = obj.obj_data["box"]
@ -1010,7 +1068,9 @@ class PtzAutoTracker:
f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}"
)
zoom = self._get_zoom_amount(camera, obj, predicted_box, debug_zoom=True)
zoom = self._get_zoom_amount(
camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
)
self._enqueue_move(camera, obj.obj_data["frame_time"], pan, tilt, zoom)
@ -1018,12 +1078,14 @@ class PtzAutoTracker:
camera_config = self.config.cameras[camera]
if camera_config.onvif.autotracking.zooming != ZoomingModeEnum.disabled:
zoom = self._get_zoom_amount(camera, obj, obj.obj_data["box"])
zoom = self._get_zoom_amount(camera, obj, obj.obj_data["box"], 0)
if zoom != 0:
self._enqueue_move(camera, obj.obj_data["frame_time"], 0, 0, zoom)
def _get_zoom_amount(self, camera, obj, predicted_box, debug_zoom=True):
def _get_zoom_amount(
self, camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
):
camera_config = self.config.cameras[camera]
# frame width and height
@ -1046,7 +1108,11 @@ class PtzAutoTracker:
else:
if (
result := self._should_zoom_in(
camera, obj, obj.obj_data["box"], debug_zoom
camera,
obj,
obj.obj_data["box"],
predicted_movement_time,
debug_zoom,
)
) is not None:
# divide zoom in 10 increments and always zoom out more than in
@ -1077,13 +1143,27 @@ class PtzAutoTracker:
predicted_box
if camera_config.onvif.autotracking.movement_weights
else obj.obj_data["box"],
predicted_movement_time,
debug_zoom,
)
) is not None:
if predicted_movement_time:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
] + self._predict_area_after_time(
camera, predicted_movement_time
) / (camera_width * camera_height)
logger.debug(
f"{camera}: Zooming prediction: predicted movement time: {predicted_movement_time}, original box: {self.tracked_object_metrics[camera]['target_box']}, calculated box: {calculated_target_box}"
)
else:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
]
# zoom value
ratio = (
self.tracked_object_metrics[camera]["max_target_box"]
/ self.tracked_object_metrics[camera]["target_box"]
/ calculated_target_box
)
zoom = (ratio - 1) / (ratio + 1)
logger.debug(
@ -1151,28 +1231,28 @@ class PtzAutoTracker:
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
and obj.obj_data["frame_time"]
!= self.tracked_object_history[camera][-1]["frame_time"]
and not ptz_moving_at_frame_time(
obj.obj_data["frame_time"],
self.ptz_metrics[camera]["ptz_start_time"].value,
self.ptz_metrics[camera]["ptz_stop_time"].value,
)
):
self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data))
self._calculate_tracked_object_metrics(camera, obj)
if self.tracked_object_metrics[camera]["below_distance_threshold"]:
logger.debug(
f"{camera}: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
if not ptz_moving_at_frame_time(
obj.obj_data["frame_time"],
self.ptz_metrics[camera]["ptz_start_time"].value,
self.ptz_metrics[camera]["ptz_stop_time"].value,
):
if self.tracked_object_metrics[camera]["below_distance_threshold"]:
logger.debug(
f"{camera}: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
# no need to move, but try zooming
self._autotrack_move_zoom_only(camera, obj)
else:
logger.debug(
f"{camera}: Existing object (need to move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
# no need to move, but try zooming
self._autotrack_move_zoom_only(camera, obj)
else:
logger.debug(
f"{camera}: Existing object (need to move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
self._autotrack_move_ptz(camera, obj)
self._autotrack_move_ptz(camera, obj)
return

View File

@ -538,18 +538,16 @@ class OnvifController:
pan_tilt_status = getattr(status, "MoveStatus", None)
# we're unsupported
if pan_tilt_status is None or pan_tilt_status.lower() not in [
"idle",
"moving",
if pan_tilt_status is None or pan_tilt_status not in [
"IDLE",
"MOVING",
]:
logger.error(
f"Camera {camera_name} does not support the ONVIF GetStatus method. Autotracking will not function correctly and must be disabled in your config."
)
return
if pan_tilt_status.lower() == "idle" and (
zoom_status is None or zoom_status.lower() == "idle"
):
if pan_tilt_status == "IDLE" and (zoom_status is None or zoom_status == "IDLE"):
self.cams[camera_name]["active"] = False
if not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()

View File

@ -7,9 +7,9 @@ import threading
from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path
from frigate.config import FrigateConfig, RetainModeEnum
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Event, Recordings
from frigate.models import Event, Previews, Recordings
from frigate.record.util import remove_empty_directories, sync_recordings
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
@ -33,10 +33,152 @@ class RecordingCleanup(threading.Thread):
logger.debug("Deleting tmp clip.")
clear_and_unlink(p)
def expire_existing_camera_recordings(
self, expire_date: float, config: CameraConfig, events: Event
) -> None:
"""Delete recordings for existing camera based on retention config."""
# Get the timestamp for cutoff of retained days
# Get recordings to check for expiration
recordings: Recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.path,
Recordings.objects,
Recordings.motion,
)
.where(
Recordings.camera == config.name,
Recordings.end_time < expire_date,
)
.order_by(Recordings.start_time)
.namedtuples()
.iterator()
)
# loop over recordings and see if they overlap with any non-expired events
# TODO: expire segments based on segment stats according to config
event_start = 0
deleted_recordings = set()
kept_recordings: list[tuple[float, float]] = []
for recording in recordings:
keep = False
# Now look for a reason to keep this recording segment
for idx in range(event_start, len(events)):
event: Event = events[idx]
# if the event starts in the future, stop checking events
# and let this recording segment expire
if event.start_time > recording.end_time:
keep = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= recording.start_time:
keep = True
break
# if the event ends before this recording segment starts, skip
# this event and check the next event for an overlap.
# since the events and recordings are sorted, we can skip events
# that end before the previous recording segment started on future segments
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings outside of the retention window or based on the retention mode
if (
not keep
or (
config.record.events.retain.mode == RetainModeEnum.motion
and recording.motion == 0
)
or (
config.record.events.retain.mode == RetainModeEnum.active_objects
and recording.objects == 0
)
):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
else:
kept_recordings.append((recording.start_time, recording.end_time))
# expire recordings
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_recordings_list = list(deleted_recordings)
for i in range(0, len(deleted_recordings_list), max_deletes):
Recordings.delete().where(
Recordings.id << deleted_recordings_list[i : i + max_deletes]
).execute()
previews: Previews = (
Previews.select(
Previews.id,
Previews.start_time,
Previews.end_time,
Previews.path,
)
.where(
Previews.camera == config.name,
Previews.end_time < expire_date,
)
.order_by(Previews.start_time)
.namedtuples()
.iterator()
)
# expire previews
recording_start = 0
deleted_previews = set()
for preview in previews:
keep = False
# look for a reason to keep this preview
for idx in range(recording_start, len(kept_recordings)):
start_time, end_time = kept_recordings[idx]
# if the recording starts in the future, stop checking recordings
# and let this preview expire
if start_time > preview.end_time:
keep = False
break
# if the recording ends after the preview starts, keep it
# and stop looking at recordings
if end_time >= preview.start_time:
keep = True
break
# if the recording ends before this preview starts, skip
# this recording and check the next recording for an overlap.
# since the kept recordings and previews are sorted, we can skip recordings
# that end before the current preview started
if end_time < preview.start_time:
recording_start = idx
# Delete previews without any relevant recordings
if not keep:
Path(preview.path).unlink(missing_ok=True)
deleted_previews.add(preview.id)
# expire previews
logger.debug(f"Expiring {len(deleted_previews)} previews")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_previews_list = list(deleted_previews)
for i in range(0, len(deleted_previews_list), max_deletes):
Previews.delete().where(
Previews.id << deleted_previews_list[i : i + max_deletes]
).execute()
def expire_recordings(self) -> None:
"""Delete recordings based on retention config."""
logger.debug("Start expire recordings.")
logger.debug("Start deleted cameras.")
# Handle deleted cameras
expire_days = self.config.record.retain.days
expire_before = (
@ -73,31 +215,12 @@ class RecordingCleanup(threading.Thread):
logger.debug("Start all cameras.")
for camera, config in self.config.cameras.items():
logger.debug(f"Start camera: {camera}.")
# Get the timestamp for cutoff of retained days
expire_days = config.record.retain.days
expire_date = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# Get recordings to check for expiration
recordings: Recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.path,
Recordings.objects,
Recordings.motion,
)
.where(
Recordings.camera == camera,
Recordings.end_time < expire_date,
)
.order_by(Recordings.start_time)
.namedtuples()
.iterator()
)
# Get all the events to check against
events: Event = (
Event.select(
@ -115,60 +238,7 @@ class RecordingCleanup(threading.Thread):
.namedtuples()
)
# loop over recordings and see if they overlap with any non-expired events
# TODO: expire segments based on segment stats according to config
event_start = 0
deleted_recordings = set()
for recording in recordings:
keep = False
# Now look for a reason to keep this recording segment
for idx in range(event_start, len(events)):
event: Event = events[idx]
# if the event starts in the future, stop checking events
# and let this recording segment expire
if event.start_time > recording.end_time:
keep = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= recording.start_time:
keep = True
break
# if the event ends before this recording segment starts, skip
# this event and check the next event for an overlap.
# since the events and recordings are sorted, we can skip events
# that end before the previous recording segment started on future segments
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings outside of the retention window or based on the retention mode
if (
not keep
or (
config.record.events.retain.mode == RetainModeEnum.motion
and recording.motion == 0
)
or (
config.record.events.retain.mode
== RetainModeEnum.active_objects
and recording.objects == 0
)
):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_recordings_list = list(deleted_recordings)
for i in range(0, len(deleted_recordings_list), max_deletes):
Recordings.delete().where(
Recordings.id << deleted_recordings_list[i : i + max_deletes]
).execute()
self.expire_existing_camera_recordings(expire_date, config, events)
logger.debug(f"End camera: {camera}.")
logger.debug("End all cameras.")

View File

@ -2,7 +2,7 @@
import unittest
from frigate.output import get_canvas_shape
from frigate.output.birdseye import get_canvas_shape
class TestBirdseye(unittest.TestCase):

View File

@ -653,7 +653,7 @@ class TestConfig(unittest.TestCase):
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
"roles": ["detect"],
},
{"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]},
]
@ -930,7 +930,7 @@ class TestConfig(unittest.TestCase):
"width": 1920,
"fps": 5,
},
"rtmp": {"enabled": True},
"audio": {"enabled": True},
}
},
}
@ -1167,122 +1167,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].snapshots.height == 150
assert runtime_config.cameras["back"].snapshots.enabled
def test_global_rtmp_disabled(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_default_not_rtmp(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"rtmp": {
"enabled": True,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_default(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
{
"path": "rtsp://10.0.0.1:554/video2",
"roles": ["record"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_jsmpeg(self):
config = {
"mqtt": {"host": "mqtt"},
@ -1428,7 +1312,6 @@ class TestConfig(unittest.TestCase):
def test_global_timestamp_style_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
"timestamp_style": {"position": "br", "thickness": 2},
"cameras": {
"back": {

View File

@ -14,13 +14,12 @@ class TestFfmpegPresets(unittest.TestCase):
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
"roles": ["detect"],
}
],
"output_args": {
"detect": "-f rawvideo -pix_fmt yuv420p",
"record": "-f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an",
"rtmp": "-c copy -f flv",
},
},
"detect": {
@ -31,9 +30,6 @@ class TestFfmpegPresets(unittest.TestCase):
"record": {
"enabled": True,
},
"rtmp": {
"enabled": True,
},
"name": "back",
}
},
@ -157,29 +153,6 @@ class TestFfmpegPresets(unittest.TestCase):
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_rtmp_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "preset-rtmp-jpeg"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-jpeg" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert "-c:v libx264" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_rtmp_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "-some output"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -28,6 +28,7 @@ class TimelineProcessor(threading.Thread):
self.config = config
self.queue = queue
self.stop_event = stop_event
self.pre_event_cache: dict[str, list[dict[str, any]]] = {}
def run(self) -> None:
while not self.stop_event.is_set():
@ -46,6 +47,32 @@ class TimelineProcessor(threading.Thread):
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
elif input_type == EventTypeEnum.api:
self.handle_api_entry(camera, event_type, event_data)
def insert_or_save(
self,
entry: dict[str, any],
prev_event_data: dict[any, any],
event_data: dict[any, any],
) -> None:
"""Insert into db or cache."""
id = entry[Timeline.source_id]
if not event_data["has_clip"] and not event_data["has_snapshot"]:
# the related event has not been saved yet, should be added to cache
if id in self.pre_event_cache.keys():
self.pre_event_cache[id].append(entry)
else:
self.pre_event_cache[id] = [entry]
else:
# the event is saved, insert to db and insert cached into db
if id in self.pre_event_cache.keys():
for e in self.pre_event_cache[id]:
Timeline.insert(e).execute()
self.pre_event_cache.pop(id)
Timeline.insert(entry).execute()
def handle_object_detection(
self,
@ -53,15 +80,17 @@ class TimelineProcessor(threading.Thread):
event_type: str,
prev_event_data: dict[any, any],
event_data: dict[any, any],
) -> None:
) -> bool:
"""Handle object detection."""
save = False
camera_config = self.config.cameras[camera]
event_id = event_data["id"]
timeline_entry = {
Timeline.timestamp: event_data["frame_time"],
Timeline.camera: camera,
Timeline.source: "tracked_object",
Timeline.source_id: event_data["id"],
Timeline.source_id: event_id,
Timeline.data: {
"box": to_relative_box(
camera_config.detect.width,
@ -69,6 +98,7 @@ class TimelineProcessor(threading.Thread):
event_data["box"],
),
"label": event_data["label"],
"sub_label": event_data.get("sub_label"),
"region": to_relative_box(
camera_config.detect.width,
camera_config.detect.height,
@ -77,36 +107,78 @@ class TimelineProcessor(threading.Thread):
"attribute": "",
},
}
# update sub labels for existing entries that haven't been added yet
if (
prev_event_data != None
and prev_event_data["sub_label"] != event_data["sub_label"]
and event_id in self.pre_event_cache.keys()
):
for e in self.pre_event_cache[event_id]:
e[Timeline.data]["sub_label"] = event_data["sub_label"]
if event_type == "start":
timeline_entry[Timeline.class_type] = "visible"
Timeline.insert(timeline_entry).execute()
save = True
elif event_type == "update":
# zones have been updated
if (
prev_event_data["current_zones"] != event_data["current_zones"]
and len(event_data["current_zones"]) > 0
len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
and not event_data["stationary"]
):
timeline_entry[Timeline.class_type] = "entered_zone"
timeline_entry[Timeline.data]["zones"] = event_data["current_zones"]
Timeline.insert(timeline_entry).execute()
save = True
elif prev_event_data["stationary"] != event_data["stationary"]:
timeline_entry[Timeline.class_type] = (
"stationary" if event_data["stationary"] else "active"
)
Timeline.insert(timeline_entry).execute()
save = True
elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
timeline_entry[Timeline.class_type] = "attribute"
timeline_entry[Timeline.data]["attribute"] = list(
event_data["attributes"].keys()
)[0]
Timeline.insert(timeline_entry).execute()
save = True
elif event_type == "end":
if event_data["has_clip"] or event_data["has_snapshot"]:
timeline_entry[Timeline.class_type] = "gone"
Timeline.insert(timeline_entry).execute()
else:
# if event was not saved then the timeline entries should be deleted
Timeline.delete().where(
Timeline.source_id == event_data["id"]
).execute()
timeline_entry[Timeline.class_type] = "gone"
save = True
if save:
self.insert_or_save(timeline_entry, prev_event_data, event_data)
def handle_api_entry(
self,
camera: str,
event_type: str,
event_data: dict[any, any],
) -> bool:
if event_type != "new":
return False
if event_data.get("type", "api") == "audio":
timeline_entry = {
Timeline.class_type: "heard",
Timeline.timestamp: event_data["start_time"],
Timeline.camera: camera,
Timeline.source: "audio",
Timeline.source_id: event_data["id"],
Timeline.data: {
"label": event_data["label"],
"sub_label": event_data.get("sub_label"),
},
}
else:
timeline_entry = {
Timeline.class_type: "external",
Timeline.timestamp: event_data["start_time"],
Timeline.camera: camera,
Timeline.source: "api",
Timeline.source_id: event_data["id"],
Timeline.data: {
"label": event_data["label"],
"sub_label": event_data.get("sub_label"),
},
}
Timeline.insert(timeline_entry).execute()
return True

View File

@ -387,6 +387,7 @@ def copy_yuv_to_position(
destination_shape,
source_frame=None,
source_channel_dim=None,
interpolation=cv2.INTER_LINEAR,
):
# get the coordinates of the channels for this position in the layout
y, u1, u2, v1, v2 = get_yuv_crop(
@ -435,7 +436,6 @@ def copy_yuv_to_position(
uv_y_offset = y_y_offset // 4
uv_x_offset = y_x_offset // 2
interpolation = cv2.INTER_LINEAR
# resize/copy y channel
destination_frame[
y[1] + y_y_offset : y[1] + y_y_offset + y_resize_height,

View File

@ -13,8 +13,10 @@ from typing import Optional
import cv2
import psutil
import py3nvml.py3nvml as nvml
import requests
from frigate.util.builtin import escape_special_characters
from frigate.const import FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI
from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
logger = logging.getLogger(__name__)
@ -134,7 +136,7 @@ def get_cpu_stats() -> dict[str, dict]:
"cpu": str(cpu_percent),
"cpu_average": str(round(cpu_average_usage, 2)),
"mem": f"{mem_pct}",
"cmdline": " ".join(cmdline),
"cmdline": clean_camera_user_pass(" ".join(cmdline)),
}
except Exception:
continue
@ -371,7 +373,39 @@ def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
return sp.run(ffprobe_cmd, capture_output=True)
async def get_video_properties(url, get_duration=False):
def auto_detect_hwaccel() -> str:
"""Detect hwaccel args by default."""
try:
cuda = False
vaapi = False
resp = requests.get("http://127.0.0.1:1984/api/ffmpeg/hardware", timeout=3)
if resp.status_code == 200:
data: dict[str, list[dict[str, str]]] = resp.json()
for source in data.get("sources", []):
if "cuda" in source.get("url", "") and source.get("name") == "OK":
cuda = True
if "vaapi" in source.get("url", "") and source.get("name") == "OK":
vaapi = True
except requests.RequestException:
pass
if cuda:
logger.info("Automatically detected nvidia hwaccel for video decoding")
return FFMPEG_HWACCEL_NVIDIA
if vaapi:
logger.info("Automatically detected vaapi hwaccel for video decoding")
return FFMPEG_HWACCEL_VAAPI
logger.warning(
"Did not detect hwaccel, using a GPU for accelerated video decoding is highly recommended"
)
return ""
async def get_video_properties(url, get_duration=False) -> dict[str, any]:
async def calculate_duration(video: Optional[any]) -> float:
duration = None
@ -405,7 +439,10 @@ async def get_video_properties(url, get_duration=False):
result = None
if result:
duration = float(result.strip())
try:
duration = float(result.strip())
except ValueError:
duration = -1
else:
duration = -1

View File

@ -0,0 +1,35 @@
"""Peewee migrations -- 021_create_previews_table.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql(
'CREATE TABLE IF NOT EXISTS "previews" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "camera" VARCHAR(20) NOT NULL, "path" VARCHAR(255) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "duration" REAL NOT NULL)'
)
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@ -262,7 +262,6 @@ def process(path, label, output, debug_path):
}
]
},
"rtmp": {"enabled": False},
"record": {"enabled": False},
}
},

25
web-old/.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
.npm

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 558 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 800 B

BIN
web-old/images/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
web-old/images/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,46 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="888.000000pt" height="888.000000pt" viewBox="0 0 888.000000 888.000000"
preserveAspectRatio="xMidYMid meet">
<metadata>
Created by potrace 1.11, written by Peter Selinger 2001-2013
</metadata>
<g transform="translate(0.000000,888.000000) scale(0.100000,-0.100000)"
fill="#000000" stroke="none">
<path d="M8228 8865 c-2 -2 -25 -6 -53 -9 -38 -5 -278 -56 -425 -91 -33 -7
-381 -98 -465 -121 -49 -14 -124 -34 -165 -45 -67 -18 -485 -138 -615 -176
-50 -14 -106 -30 -135 -37 -8 -2 -35 -11 -60 -19 -25 -8 -85 -27 -135 -42 -49
-14 -101 -31 -115 -36 -14 -5 -34 -11 -45 -13 -11 -3 -65 -19 -120 -36 -55
-18 -127 -40 -160 -50 -175 -53 -247 -77 -550 -178 -364 -121 -578 -200 -820
-299 -88 -36 -214 -88 -280 -115 -66 -27 -129 -53 -140 -58 -11 -5 -67 -29
-125 -54 -342 -144 -535 -259 -579 -343 -34 -66 7 -145 156 -299 229 -238 293
-316 340 -413 38 -80 41 -152 10 -281 -57 -234 -175 -543 -281 -732 -98 -174
-172 -239 -341 -297 -116 -40 -147 -52 -210 -80 -107 -49 -179 -107 -290 -236
-51 -59 -179 -105 -365 -131 -19 -2 -48 -7 -65 -9 -16 -3 -50 -8 -75 -11 -69
-9 -130 -39 -130 -63 0 -24 31 -46 78 -56 18 -4 139 -8 270 -10 250 -4 302
-11 335 -44 19 -18 19 -23 7 -46 -19 -36 -198 -121 -490 -233 -850 -328 -914
-354 -1159 -473 -185 -90 -337 -186 -395 -249 -60 -65 -67 -107 -62 -350 3
-113 7 -216 10 -230 3 -14 7 -52 10 -85 7 -70 14 -128 21 -170 2 -16 7 -48 10
-70 3 -22 11 -64 16 -94 6 -30 12 -64 14 -75 1 -12 5 -34 9 -51 3 -16 8 -39
10 -50 12 -57 58 -258 71 -310 9 -33 18 -69 20 -79 25 -110 138 -416 216 -582
21 -47 39 -87 39 -90 0 -7 217 -438 261 -521 109 -201 293 -501 347 -564 11
-13 37 -44 56 -68 69 -82 126 -109 160 -75 26 25 14 65 -48 164 -138 218 -142
245 -138 800 2 206 4 488 5 625 1 138 -1 293 -6 345 -28 345 -28 594 -1 760
12 69 54 187 86 235 33 52 188 212 293 302 98 84 108 93 144 121 19 15 52 42
75 61 78 64 302 229 426 313 248 169 483 297 600 326 53 14 205 6 365 -17 33
-5 155 -8 270 -6 179 3 226 7 316 28 58 13 140 25 182 26 82 2 120 6 217 22
73 12 97 16 122 18 12 1 23 21 38 70 l20 68 74 -17 c81 -20 155 -30 331 -45
69 -6 132 -8 715 -20 484 -11 620 -8 729 16 85 19 131 63 98 96 -25 26 -104
34 -302 32 -373 -2 -408 -1 -471 26 -90 37 2 102 171 120 33 3 76 8 95 10 19
2 71 7 115 10 243 17 267 20 338 37 145 36 47 102 -203 137 -136 19 -262 25
-490 22 -124 -2 -362 -4 -530 -4 l-305 -1 -56 26 c-65 31 -171 109 -238 176
-52 51 -141 173 -141 191 0 6 -6 22 -14 34 -18 27 -54 165 -64 244 -12 98 -6
322 12 414 9 47 29 127 45 176 26 80 58 218 66 278 1 11 6 47 10 80 3 33 8 70
10 83 2 13 7 53 11 90 3 37 8 74 9 83 22 118 22 279 -1 464 -20 172 -20 172
70 238 108 79 426 248 666 355 25 11 77 34 115 52 92 42 443 191 570 242 55
22 109 44 120 48 24 11 130 52 390 150 199 75 449 173 500 195 17 7 118 50
225 95 237 100 333 143 490 220 229 113 348 191 337 223 -3 10 -70 20 -79 12z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
web-old/images/marker.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 534 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

26
web-old/index.html Normal file
View File

@ -0,0 +1,26 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" href="/images/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Frigate</title>
<link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png" />
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png" />
<link rel="icon" type="image/svg+xml" href="/images/favicon.svg" />
<link rel="manifest" href="/site.webmanifest" />
<link rel="mask-icon" href="/images/favicon.svg" color="#3b82f7" />
<meta name="msapplication-TileColor" content="#3b82f7" />
<meta name="theme-color" content="#ffffff" media="(prefers-color-scheme: light)" />
<meta name="theme-color" content="#111827" media="(prefers-color-scheme: dark)" />
</head>
<body>
<div id="app" class="z-0"></div>
<div id="dialogs" class="z-0"></div>
<div id="menus" class="z-0"></div>
<div id="tooltips" class="z-0"></div>
<noscript>You need to enable JavaScript to run this app.</noscript>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

16624
web-old/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

62
web-old/package.json Normal file
View File

@ -0,0 +1,62 @@
{
"name": "frigate",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite --host",
"build": "tsc && vite build --base=/BASE_PATH/",
"lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore .",
"prettier:write": "prettier -u -w --ignore-path .gitignore \"*.{ts,tsx,js,jsx,css,html}\"",
"test": "vitest",
"coverage": "vitest run --coverage"
},
"dependencies": {
"@cycjimmy/jsmpeg-player": "^6.0.5",
"axios": "^1.5.0",
"copy-to-clipboard": "3.3.3",
"date-fns": "^2.30.0",
"idb-keyval": "^6.2.0",
"immer": "^10.0.1",
"monaco-yaml": "^4.0.4",
"preact": "^10.17.1",
"preact-async-route": "^2.2.1",
"preact-router": "^4.1.0",
"react": "npm:@preact/compat@^17.1.2",
"react-dom": "npm:@preact/compat@^17.1.2",
"react-use-websocket": "^3.0.0",
"strftime": "^0.10.1",
"swr": "^1.3.0",
"video.js": "^8.5.2",
"videojs-playlist": "^5.1.0",
"vite-plugin-monaco-editor": "^1.1.0"
},
"devDependencies": {
"@preact/preset-vite": "^2.5.0",
"@tailwindcss/forms": "^0.5.6",
"@testing-library/jest-dom": "^6.1.2",
"@testing-library/preact": "^3.2.3",
"@testing-library/user-event": "^14.4.3",
"@typescript-eslint/eslint-plugin": "^6.5.0",
"@typescript-eslint/parser": "^6.5.0",
"@vitest/coverage-v8": "^0.34.3",
"@vitest/ui": "^0.34.3",
"autoprefixer": "^10.4.15",
"eslint": "^8.48.0",
"eslint-config-preact": "^1.3.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-jest": "^27.2.3",
"eslint-plugin-prettier": "^5.0.0",
"eslint-plugin-vitest-globals": "^1.4.0",
"fake-indexeddb": "^4.0.1",
"jest-websocket-mock": "^2.5.0",
"jsdom": "^22.0.0",
"msw": "^1.2.1",
"postcss": "^8.4.29",
"prettier": "^3.0.3",
"tailwindcss": "^3.3.2",
"typescript": "^5.2.2",
"vite": "^4.4.9",
"vitest": "^0.34.3"
}
}

View File

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

Before

Width:  |  Height:  |  Size: 6.9 KiB

After

Width:  |  Height:  |  Size: 6.9 KiB

Some files were not shown because too many files have changed in this diff Show More