Merge branch 'onvif-configs-checks' of https://github.com/hawkeye217/frigate into onvif-configs-checks

This commit is contained in:
Josh Hawkins 2024-02-03 12:56:57 -06:00
commit 64ad71acf0
345 changed files with 33269 additions and 12173 deletions

View File

@ -10,7 +10,7 @@
"features": { "features": {
"ghcr.io/devcontainers/features/common-utils:1": {} "ghcr.io/devcontainers/features/common-utils:1": {}
}, },
"forwardPorts": [5000, 5001, 5173, 1935, 8554, 8555], "forwardPorts": [5000, 5001, 5173, 8554, 8555],
"portsAttributes": { "portsAttributes": {
"5000": { "5000": {
"label": "NGINX", "label": "NGINX",
@ -24,10 +24,6 @@
"label": "Vite Server", "label": "Vite Server",
"onAutoForward": "silent" "onAutoForward": "silent"
}, },
"1935": {
"label": "RTMP",
"onAutoForward": "silent"
},
"8554": { "8554": {
"label": "gortc RTSP", "label": "gortc RTSP",
"onAutoForward": "silent" "onAutoForward": "silent"

View File

@ -40,9 +40,9 @@ jobs:
node-version: 16.x node-version: 16.x
- run: npm install - run: npm install
working-directory: ./web working-directory: ./web
- name: Lint # - name: Lint
run: npm run lint # run: npm run lint
working-directory: ./web # working-directory: ./web
web_test: web_test:
name: Web - Test name: Web - Test
@ -51,12 +51,12 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 16.x node-version: 20.x
- run: npm install - run: npm install
working-directory: ./web working-directory: ./web
- name: Test # - name: Test
run: npm run test # run: npm run test
working-directory: ./web # working-directory: ./web
python_checks: python_checks:
runs-on: ubuntu-latest runs-on: ubuntu-latest

1
.gitignore vendored
View File

@ -8,7 +8,6 @@ config/*
!config/*.example !config/*.example
models models
*.mp4 *.mp4
*.ts
*.db *.db
*.csv *.csv
frigate/version.py frigate/version.py

View File

@ -191,7 +191,6 @@ COPY --from=deps-rootfs / /
RUN ldconfig RUN ldconfig
EXPOSE 5000 EXPOSE 5000
EXPOSE 1935
EXPOSE 8554 EXPOSE 8554
EXPOSE 8555/tcp 8555/udp EXPOSE 8555/tcp 8555/udp
@ -237,7 +236,7 @@ CMD ["sleep", "infinity"]
# Frigate web build # Frigate web build
# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU. # This should be architecture agnostic, so speed up the build on multiarch by not using QEMU.
FROM --platform=$BUILDPLATFORM node:16 AS web-build FROM --platform=$BUILDPLATFORM node:20 AS web-build
WORKDIR /work WORKDIR /work
COPY web/package.json web/package-lock.json ./ COPY web/package.json web/package-lock.json ./

View File

@ -5,7 +5,6 @@ set -euxo pipefail
NGINX_VERSION="1.25.3" NGINX_VERSION="1.25.3"
VOD_MODULE_VERSION="1.31" VOD_MODULE_VERSION="1.31"
SECURE_TOKEN_MODULE_VERSION="1.5" SECURE_TOKEN_MODULE_VERSION="1.5"
RTMP_MODULE_VERSION="1.2.2"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
@ -49,10 +48,6 @@ mkdir /tmp/nginx-secure-token-module
wget https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz wget https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz
tar -zxf ${SECURE_TOKEN_MODULE_VERSION}.tar.gz -C /tmp/nginx-secure-token-module --strip-components=1 tar -zxf ${SECURE_TOKEN_MODULE_VERSION}.tar.gz -C /tmp/nginx-secure-token-module --strip-components=1
rm ${SECURE_TOKEN_MODULE_VERSION}.tar.gz rm ${SECURE_TOKEN_MODULE_VERSION}.tar.gz
mkdir /tmp/nginx-rtmp-module
wget -nv https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz
tar -zxf v${RTMP_MODULE_VERSION}.tar.gz -C /tmp/nginx-rtmp-module --strip-components=1
rm v${RTMP_MODULE_VERSION}.tar.gz
cd /tmp/nginx cd /tmp/nginx
@ -63,7 +58,6 @@ cd /tmp/nginx
--with-threads \ --with-threads \
--add-module=../nginx-vod-module \ --add-module=../nginx-vod-module \
--add-module=../nginx-secure-token-module \ --add-module=../nginx-secure-token-module \
--add-module=../nginx-rtmp-module \
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough" --with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
make CC="ccache gcc" -j$(nproc) && make install make CC="ccache gcc" -j$(nproc) && make install

View File

@ -7,6 +7,7 @@ numpy == 1.23.*
onvif_zeep == 0.2.12 onvif_zeep == 0.2.12
opencv-python-headless == 4.7.0.* opencv-python-headless == 4.7.0.*
paho-mqtt == 1.6.* paho-mqtt == 1.6.*
pandas == 2.1.4
peewee == 3.17.* peewee == 3.17.*
peewee_migrate == 1.12.* peewee_migrate == 1.12.*
psutil == 5.9.* psutil == 5.9.*

View File

@ -277,18 +277,3 @@ http {
} }
} }
} }
rtmp {
server {
listen 1935;
chunk_size 4096;
allow publish 127.0.0.1;
deny publish all;
allow play all;
application live {
live on;
record off;
meta copy;
}
}
}

View File

@ -69,16 +69,12 @@ cameras:
ffmpeg: ffmpeg:
output_args: output_args:
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
rtmp: -c:v copy -c:a aac -f flv
inputs: inputs:
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera - path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
roles: roles:
- detect - detect
- record - record
- rtmp
rtmp:
enabled: False # <-- RTMP should be disabled if your stream is not H264
detect: detect:
width: # <- optional, by default Frigate tries to automatically detect resolution width: # <- optional, by default Frigate tries to automatically detect resolution
height: # <- optional, by default Frigate tries to automatically detect resolution height: # <- optional, by default Frigate tries to automatically detect resolution
@ -181,13 +177,12 @@ go2rtc:
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp) [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
```yaml ```yaml
ffmpeg: ffmpeg:
output_args: output_args:
record: preset-record-ubiquiti record: preset-record-ubiquiti
rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead
``` ```
### TP-Link VIGI Cameras ### TP-Link VIGI Cameras

View File

@ -16,7 +16,6 @@ Each role can only be assigned to one input per camera. The options for roles ar
| `detect` | Main feed for object detection. [docs](object_detectors.md) | | `detect` | Main feed for object detection. [docs](object_detectors.md) |
| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | | `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) |
| `audio` | Feed for audio based detection. [docs](audio_detectors.md) | | `audio` | Feed for audio based detection. [docs](audio_detectors.md) |
| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) |
```yaml ```yaml
mqtt: mqtt:
@ -29,7 +28,6 @@ cameras:
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
roles: roles:
- detect - detect
- rtmp # <- deprecated, recommend using restream instead
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live
roles: roles:
- record - record

View File

@ -18,9 +18,7 @@ See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on
| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | | preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen |
| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | | preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead |
| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | | preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead |
| preset-nvidia-h264 | Nvidia GPU with h264 stream | | | preset-nvidia | Nvidia GPU | |
| preset-nvidia-h265 | Nvidia GPU with h265 stream | |
| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 |
| preset-jetson-h264 | Nvidia Jetson with h264 stream | | | preset-jetson-h264 | Nvidia Jetson with h264 stream | |
| preset-jetson-h265 | Nvidia Jetson with h265 stream | | | preset-jetson-h265 | Nvidia Jetson with h265 stream | |
| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode | | preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode |

View File

@ -5,7 +5,9 @@ title: Hardware Acceleration
# Hardware Acceleration # Hardware Acceleration
It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
# Officially Supported # Officially Supported

View File

@ -151,9 +151,9 @@ birdseye:
ffmpeg: ffmpeg:
# Optional: global ffmpeg args (default: shown below) # Optional: global ffmpeg args (default: shown below)
global_args: -hide_banner -loglevel warning -threads 2 global_args: -hide_banner -loglevel warning -threads 2
# Optional: global hwaccel args (default: shown below) # Optional: global hwaccel args (default: auto detect)
# NOTE: See hardware acceleration docs for your specific device # NOTE: See hardware acceleration docs for your specific device
hwaccel_args: [] hwaccel_args: "auto"
# Optional: global input args (default: shown below) # Optional: global input args (default: shown below)
input_args: preset-rtsp-generic input_args: preset-rtsp-generic
# Optional: global output args # Optional: global output args
@ -162,8 +162,6 @@ ffmpeg:
detect: -threads 2 -f rawvideo -pix_fmt yuv420p detect: -threads 2 -f rawvideo -pix_fmt yuv420p
# Optional: output args for record streams (default: shown below) # Optional: output args for record streams (default: shown below)
record: preset-record-generic record: preset-record-generic
# Optional: output args for rtmp streams (default: shown below)
rtmp: preset-rtmp-generic
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below) # Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once # If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage # If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
@ -325,6 +323,11 @@ record:
# The -r (framerate) dictates how smooth the output video is. # The -r (framerate) dictates how smooth the output video is.
# So the args would be -vf setpts=0.02*PTS -r 30 in that case. # So the args would be -vf setpts=0.02*PTS -r 30 in that case.
timelapse_args: "-vf setpts=0.04*PTS -r 30" timelapse_args: "-vf setpts=0.04*PTS -r 30"
# Optional: Recording Preview Settings
preview:
# Optional: Quality of recording preview (default: shown below).
# Options are: very_low, low, medium, high, very_high
quality: medium
# Optional: Event recording settings # Optional: Event recording settings
events: events:
# Optional: Number of seconds before the event to include (default: shown below) # Optional: Number of seconds before the event to include (default: shown below)
@ -381,13 +384,6 @@ snapshots:
# Optional: quality of the encoded jpeg, 0-100 (default: shown below) # Optional: quality of the encoded jpeg, 0-100 (default: shown below)
quality: 70 quality: 70
# Optional: RTMP configuration
# NOTE: RTMP is deprecated in favor of restream
# NOTE: Can be overridden at the camera level
rtmp:
# Optional: Enable the RTMP stream (default: False)
enabled: False
# Optional: Restream configuration # Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.8.3) # Uses https://github.com/AlexxIT/go2rtc (v1.8.3)
go2rtc: go2rtc:
@ -444,14 +440,13 @@ cameras:
# Required: the path to the stream # Required: the path to the stream
# NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} # NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: audio,detect,record,rtmp # Required: list of roles for this stream. valid values are: audio,detect,record
# NOTICE: In addition to assigning the audio, record, and rtmp roles, # NOTICE: In addition to assigning the audio, detect, and record roles
# they must also be enabled in the camera config. # they must also be enabled in the camera config.
roles: roles:
- audio - audio
- detect - detect
- record - record
- rtmp
# Optional: stream specific global args (default: inherit) # Optional: stream specific global args (default: inherit)
# global_args: # global_args:
# Optional: stream specific hwaccel args (default: inherit) # Optional: stream specific hwaccel args (default: inherit)

View File

@ -38,10 +38,6 @@ go2rtc:
**NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras. **NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras.
## RTMP (Deprecated)
In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended use the built in go2rtc config for restreaming.
## Reduce Connections To Camera ## Reduce Connections To Camera
Some cameras only support one active connection or you may just want to have a single connection open to the camera. The RTSP restream allows this to be possible. Some cameras only support one active connection or you may just want to have a single connection open to the camera. The RTSP restream allows this to be possible.

View File

@ -295,7 +295,6 @@ docker run \
--network=bridge \ --network=bridge \
--privileged \ --privileged \
--workdir=/opt/frigate \ --workdir=/opt/frigate \
-p 1935:1935 \
-p 5000:5000 \ -p 5000:5000 \
-p 8554:8554 \ -p 8554:8554 \
-p 8555:8555 \ -p 8555:8555 \

View File

@ -9,7 +9,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream - WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
- Live stream support for cameras in Home Assistant Integration - Live stream support for cameras in Home Assistant Integration
- RTSP (instead of RTMP) relay for use with other consumers to reduce the number of connections to your camera streams - RTSP relay for use with other consumers to reduce the number of connections to your camera streams
# Setup a go2rtc stream # Setup a go2rtc stream

View File

@ -124,10 +124,6 @@ https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
<a name="streams"></a> <a name="streams"></a>
## RTMP stream
RTMP is deprecated and it is recommended to switch to use RTSP restreams.
## RTSP stream ## RTSP stream
In order for the live streams to function they need to be accessible on the RTSP In order for the live streams to function they need to be accessible on the RTSP

View File

@ -37,10 +37,17 @@ from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor from frigate.events.maintainer import EventProcessor
from frigate.http import create_app from frigate.http import create_app
from frigate.log import log_process, root_configurer from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings, RecordingsToDelete, Regions, Timeline from frigate.models import (
Event,
Previews,
Recordings,
RecordingsToDelete,
Regions,
Timeline,
)
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.object_processing import TrackedObjectProcessor from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames from frigate.output.output import output_frames
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
@ -369,7 +376,7 @@ class FrigateApp:
60, 10 * len([c for c in self.config.cameras.values() if c.enabled]) 60, 10 * len([c for c in self.config.cameras.values() if c.enabled])
), ),
) )
models = [Event, Recordings, RecordingsToDelete, Regions, Timeline] models = [Event, Recordings, RecordingsToDelete, Previews, Regions, Timeline]
self.db.bind(models) self.db.bind(models)
def init_stats(self) -> None: def init_stats(self) -> None:
@ -488,6 +495,7 @@ class FrigateApp:
args=( args=(
self.config, self.config,
self.video_output_queue, self.video_output_queue,
self.inter_process_queue,
self.camera_metrics, self.camera_metrics,
), ),
) )

View File

@ -5,8 +5,8 @@ from abc import ABC, abstractmethod
from typing import Any, Callable from typing import Any, Callable
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import INSERT_MANY_RECORDINGS, REQUEST_REGION_GRID from frigate.const import INSERT_MANY_RECORDINGS, INSERT_PREVIEW, REQUEST_REGION_GRID
from frigate.models import Recordings from frigate.models import Previews, Recordings
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
@ -102,6 +102,8 @@ class Dispatcher:
max(self.config.model.width, self.config.model.height), max(self.config.model.width, self.config.model.height),
) )
) )
elif topic == INSERT_PREVIEW:
Previews.insert(payload).execute()
else: else:
self.publish(topic, payload, retain=False) self.publish(topic, payload, retain=False)

View File

@ -30,7 +30,6 @@ from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_scale, parse_preset_hardware_acceleration_scale,
parse_preset_input, parse_preset_input,
parse_preset_output_record, parse_preset_output_record,
parse_preset_output_rtmp,
) )
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util.builtin import ( from frigate.util.builtin import (
@ -40,7 +39,7 @@ from frigate.util.builtin import (
load_config_with_no_duplicates, load_config_with_no_duplicates,
) )
from frigate.util.image import create_mask from frigate.util.image import create_mask
from frigate.util.services import get_video_properties from frigate.util.services import auto_detect_hwaccel, get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -260,6 +259,20 @@ class RecordExportConfig(FrigateBaseModel):
) )
class RecordQualityEnum(str, Enum):
very_low = "very_low"
low = "low"
medium = "medium"
high = "high"
very_high = "very_high"
class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium, title="Quality of recording preview."
)
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.") enabled: bool = Field(default=False, title="Enable record on all cameras.")
sync_recordings: bool = Field( sync_recordings: bool = Field(
@ -278,6 +291,9 @@ class RecordConfig(FrigateBaseModel):
export: RecordExportConfig = Field( export: RecordExportConfig = Field(
default_factory=RecordExportConfig, title="Recording Export Config" default_factory=RecordExportConfig, title="Recording Export Config"
) )
preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig, title="Recording Preview Config"
)
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of recording." title="Keep track of original state of recording."
) )
@ -565,7 +581,6 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-pix_fmt", "-pix_fmt",
"yuv420p", "yuv420p",
] ]
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-rtmp-generic"
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic" RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
@ -578,10 +593,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.", title="Record role FFmpeg output arguments.",
) )
rtmp: Union[str, List[str]] = Field(
default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="RTMP role FFmpeg output arguments.",
)
class FfmpegConfig(FrigateBaseModel): class FfmpegConfig(FrigateBaseModel):
@ -589,7 +600,7 @@ class FfmpegConfig(FrigateBaseModel):
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments." default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
) )
hwaccel_args: Union[str, List[str]] = Field( hwaccel_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg hardware acceleration arguments." default="auto", title="FFmpeg hardware acceleration arguments."
) )
input_args: Union[str, List[str]] = Field( input_args: Union[str, List[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments." default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
@ -607,7 +618,6 @@ class FfmpegConfig(FrigateBaseModel):
class CameraRoleEnum(str, Enum): class CameraRoleEnum(str, Enum):
audio = "audio" audio = "audio"
record = "record" record = "record"
rtmp = "rtmp"
detect = "detect" detect = "detect"
@ -716,10 +726,6 @@ class CameraMqttConfig(FrigateBaseModel):
) )
class RtmpConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="RTMP restreaming enabled.")
class CameraLiveConfig(FrigateBaseModel): class CameraLiveConfig(FrigateBaseModel):
stream_name: str = Field(default="", title="Name of restream to use as live view.") stream_name: str = Field(default="", title="Name of restream to use as live view.")
height: int = Field(default=720, title="Live camera view height") height: int = Field(default=720, title="Live camera view height")
@ -755,9 +761,6 @@ class CameraConfig(FrigateBaseModel):
record: RecordConfig = Field( record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration." default_factory=RecordConfig, title="Record configuration."
) )
rtmp: RtmpConfig = Field(
default_factory=RtmpConfig, title="RTMP restreaming configuration."
)
live: CameraLiveConfig = Field( live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings." default_factory=CameraLiveConfig, title="Live playback settings."
) )
@ -802,7 +805,6 @@ class CameraConfig(FrigateBaseModel):
# add roles to the input if there is only one # add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1: if len(config["ffmpeg"]["inputs"]) == 1:
has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", [])
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", []) has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
config["ffmpeg"]["inputs"][0]["roles"] = [ config["ffmpeg"]["inputs"][0]["roles"] = [
@ -813,9 +815,6 @@ class CameraConfig(FrigateBaseModel):
if has_audio: if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio") config["ffmpeg"]["inputs"][0]["roles"].append("audio")
if has_rtmp:
config["ffmpeg"]["inputs"][0]["roles"].append("rtmp")
super().__init__(**config) super().__init__(**config)
@property @property
@ -855,15 +854,7 @@ class CameraConfig(FrigateBaseModel):
) )
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"] ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled:
rtmp_args = get_ffmpeg_arg_list(
parse_preset_output_rtmp(self.ffmpeg.output_args.rtmp)
or self.ffmpeg.output_args.rtmp
)
ffmpeg_output_args = (
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
)
if "record" in ffmpeg_input.roles and self.record.enabled: if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list( record_args = get_ffmpeg_arg_list(
parse_preset_output_record(self.ffmpeg.output_args.record) parse_preset_output_record(self.ffmpeg.output_args.record)
@ -950,11 +941,6 @@ def verify_config_roles(camera_config: CameraConfig) -> None:
f"Camera {camera_config.name} has record enabled, but record is not assigned to an input." f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
) )
if camera_config.rtmp.enabled and "rtmp" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input."
)
if camera_config.audio.enabled and "audio" not in assigned_roles: if camera_config.audio.enabled and "audio" not in assigned_roles:
raise ValueError( raise ValueError(
f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input." f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
@ -1065,9 +1051,6 @@ class FrigateConfig(FrigateBaseModel):
snapshots: SnapshotsConfig = Field( snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration." default_factory=SnapshotsConfig, title="Global snapshots configuration."
) )
rtmp: RtmpConfig = Field(
default_factory=RtmpConfig, title="Global RTMP restreaming configuration."
)
live: CameraLiveConfig = Field( live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings." default_factory=CameraLiveConfig, title="Live playback settings."
) )
@ -1114,6 +1097,10 @@ class FrigateConfig(FrigateBaseModel):
elif config.objects.filters[attribute].min_score == 0.5: elif config.objects.filters[attribute].min_score == 0.5:
config.objects.filters[attribute].min_score = 0.7 config.objects.filters[attribute].min_score = 0.7
# auto detect hwaccel args
if config.ffmpeg.hwaccel_args == "auto":
config.ffmpeg.hwaccel_args = auto_detect_hwaccel()
# Global config to propagate down to camera level # Global config to propagate down to camera level
global_config = config.dict( global_config = config.dict(
include={ include={
@ -1121,7 +1108,6 @@ class FrigateConfig(FrigateBaseModel):
"birdseye": ..., "birdseye": ...,
"record": ..., "record": ...,
"snapshots": ..., "snapshots": ...,
"rtmp": ...,
"live": ..., "live": ...,
"objects": ..., "objects": ...,
"motion": ..., "motion": ...,
@ -1138,6 +1124,9 @@ class FrigateConfig(FrigateBaseModel):
{"name": name, **merged_config} {"name": name, **merged_config}
) )
if camera_config.ffmpeg.hwaccel_args == "auto":
camera_config.ffmpeg.hwaccel_args = config.ffmpeg.hwaccel_args
if ( if (
camera_config.detect.height is None camera_config.detect.height is None
or camera_config.detect.width is None or camera_config.detect.width is None
@ -1255,11 +1244,6 @@ class FrigateConfig(FrigateBaseModel):
verify_zone_objects_are_tracked(camera_config) verify_zone_objects_are_tracked(camera_config)
verify_autotrack_zones(camera_config) verify_autotrack_zones(camera_config)
if camera_config.rtmp.enabled:
logger.warning(
"RTMP restream is deprecated in favor of the restream role, recommend disabling RTMP."
)
# generate the ffmpeg commands # generate the ffmpeg commands
camera_config.create_ffmpeg_cmds() camera_config.create_ffmpeg_cmds()
config.cameras[name] = camera_config config.cameras[name] = camera_config

View File

@ -35,6 +35,11 @@ AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000 AUDIO_SAMPLE_RATE = 16000
AUDIO_MIN_CONFIDENCE = 0.5 AUDIO_MIN_CONFIDENCE = 0.5
# Ffmpeg Presets
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
# Regex Consts # Regex Consts
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$" REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
@ -59,6 +64,7 @@ MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to
# Internal Comms Topics # Internal Comms Topics
INSERT_MANY_RECORDINGS = "insert_many_recordings" INSERT_MANY_RECORDINGS = "insert_many_recordings"
INSERT_PREVIEW = "insert_preview"
REQUEST_REGION_GRID = "request_region_grid" REQUEST_REGION_GRID = "request_region_grid"
# Autotracking # Autotracking
@ -67,6 +73,6 @@ AUTOTRACKING_MAX_AREA_RATIO = 0.6
AUTOTRACKING_MOTION_MIN_DISTANCE = 20 AUTOTRACKING_MOTION_MIN_DISTANCE = 20
AUTOTRACKING_MOTION_MAX_POINTS = 500 AUTOTRACKING_MOTION_MAX_POINTS = 500
AUTOTRACKING_MAX_MOVE_METRICS = 500 AUTOTRACKING_MAX_MOVE_METRICS = 500
AUTOTRACKING_ZOOM_OUT_HYSTERESIS = 1.2 AUTOTRACKING_ZOOM_OUT_HYSTERESIS = 1.1
AUTOTRACKING_ZOOM_IN_HYSTERESIS = 0.9 AUTOTRACKING_ZOOM_IN_HYSTERESIS = 0.95
AUTOTRACKING_ZOOM_EDGE_THRESHOLD = 0.05 AUTOTRACKING_ZOOM_EDGE_THRESHOLD = 0.05

View File

@ -52,7 +52,7 @@ class ExternalEventProcessor:
( (
EventTypeEnum.api, EventTypeEnum.api,
"new", "new",
camera_config, camera,
{ {
"id": event_id, "id": event_id,
"label": label, "label": label,

View File

@ -45,6 +45,12 @@ def should_update_state(prev_event: Event, current_event: Event) -> bool:
if prev_event["attributes"] != current_event["attributes"]: if prev_event["attributes"] != current_event["attributes"]:
return True return True
if prev_event["sub_label"] != current_event["sub_label"]:
return True
if len(prev_event["current_zones"]) < len(current_event["current_zones"]):
return True
return False return False
@ -103,6 +109,16 @@ class EventProcessor(threading.Thread):
self.handle_object_detection(event_type, camera, event_data) self.handle_object_detection(event_type, camera, event_data)
elif source_type == EventTypeEnum.api: elif source_type == EventTypeEnum.api:
self.timeline_queue.put(
(
camera,
source_type,
event_type,
{},
event_data,
)
)
self.handle_external_detection(event_type, event_data) self.handle_external_detection(event_type, event_data)
# set an end_time on events without an end_time before exiting # set an end_time on events without an end_time before exiting

View File

@ -5,6 +5,7 @@ import os
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from frigate.const import FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI
from frigate.util.services import vainfo_hwaccel from frigate.util.services import vainfo_hwaccel
from frigate.version import VERSION from frigate.version import VERSION
@ -42,6 +43,11 @@ class LibvaGpuSelector:
return "" return ""
FPS_VFR_PARAM = (
"-fps_mode vfr"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
else "-vsync 2"
)
TIMEOUT_PARAM = ( TIMEOUT_PARAM = (
"-timeout" "-timeout"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59 if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
@ -57,55 +63,72 @@ _user_agent_args = [
PRESETS_HW_ACCEL_DECODE = { PRESETS_HW_ACCEL_DECODE = {
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m", "preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m", "preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
"preset-vaapi": f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi", FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv", "preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv", "preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
"preset-nvidia-h264": "-hwaccel cuda -hwaccel_output_format cuda", FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
"preset-nvidia-h265": "-hwaccel cuda -hwaccel_output_format cuda",
"preset-nvidia-mjpeg": "-hwaccel cuda -hwaccel_output_format cuda",
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}", "preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}", "preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
"preset-rk-h264": "-c:v h264_rkmpp_decoder", "preset-rk-h264": "-c:v h264_rkmpp_decoder",
"preset-rk-h265": "-c:v hevc_rkmpp_decoder", "preset-rk-h265": "-c:v hevc_rkmpp_decoder",
} }
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_DECODE["preset-nvidia-h265"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_SCALE = { PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-jetson-h264": "-r {0}", # scaled in decoder "preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder "preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rk-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rk-h265": "-r {0} -vf fps={0},scale={1}:{2}", "preset-rk-h265": "-r {0} -vf fps={0},scale={1}:{2}",
"default": "-r {0} -vf fps={0},scale={1}:{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
} }
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h264"] = PRESETS_HW_ACCEL_SCALE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_SCALE["preset-nvidia-h265"] = PRESETS_HW_ACCEL_SCALE[
FFMPEG_HWACCEL_NVIDIA
]
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m {1}", "preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}", FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}", FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-nvidia-h265": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}", "preset-rk-h264": "ffmpeg -hide_banner {0} -c:v h264_rkmpp_encoder -profile high {1}",
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}", "preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {1}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[
"preset-nvidia-h264"
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[
"preset-nvidia-h265"
] = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE[FFMPEG_HWACCEL_NVIDIA]
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = { PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}", "preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -pix_fmt yuv420p {1}",
"preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m -pix_fmt yuv420p {1}", "preset-rpi-64-h265": "ffmpeg -hide_banner {0} -c:v hevc_v4l2m2m -pix_fmt yuv420p {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}", FFMPEG_HWACCEL_VAAPI: "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}", "preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v h264_nvenc {1}", FFMPEG_HWACCEL_NVIDIA: "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v h264_nvenc {1}",
"preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}", "preset-nvidia-h265": "ffmpeg -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {0} -c:v hevc_nvenc {1}",
"preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}", "preset-jetson-h264": "ffmpeg -hide_banner {0} -c:v h264_nvmpi -profile high {1}",
"preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}", "preset-jetson-h265": "ffmpeg -hide_banner {0} -c:v hevc_nvmpi -profile high {1}",
@ -113,6 +136,14 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
"preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}", "preset-rk-h265": "ffmpeg -hide_banner {0} -c:v hevc_rkmpp_encoder -profile high {1}",
"default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}", "default": "ffmpeg -hide_banner {0} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {1}",
} }
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[
"preset-nvidia-h264"
] = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE[FFMPEG_HWACCEL_NVIDIA]
# encoding of previews is only done on CPU due to comparable encode times and better quality from libx264
PRESETS_HW_ACCEL_ENCODE_PREVIEW = {
"default": "ffmpeg -hide_banner {0} -c:v libx264 -profile:v baseline -preset:v ultrafast {1}",
}
def parse_preset_hardware_acceleration_decode( def parse_preset_hardware_acceleration_decode(
@ -153,6 +184,7 @@ def parse_preset_hardware_acceleration_scale(
class EncodeTypeEnum(str, Enum): class EncodeTypeEnum(str, Enum):
birdseye = "birdseye" birdseye = "birdseye"
preview = "preview"
timelapse = "timelapse" timelapse = "timelapse"
@ -162,6 +194,8 @@ def parse_preset_hardware_acceleration_encode(
"""Return the correct scaling preset or default preset if none is set.""" """Return the correct scaling preset or default preset if none is set."""
if type == EncodeTypeEnum.birdseye: if type == EncodeTypeEnum.birdseye:
arg_map = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE arg_map = PRESETS_HW_ACCEL_ENCODE_BIRDSEYE
elif type == EncodeTypeEnum.preview:
arg_map = PRESETS_HW_ACCEL_ENCODE_PREVIEW
elif type == EncodeTypeEnum.timelapse: elif type == EncodeTypeEnum.timelapse:
arg_map = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE arg_map = PRESETS_HW_ACCEL_ENCODE_TIMELAPSE
@ -433,28 +467,3 @@ def parse_preset_output_record(arg: Any) -> list[str]:
return None return None
return PRESETS_RECORD_OUTPUT.get(arg, None) return PRESETS_RECORD_OUTPUT.get(arg, None)
PRESETS_RTMP_OUTPUT = {
"preset-rtmp-generic": ["-c", "copy", "-f", "flv"],
"preset-rtmp-mjpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
"preset-rtmp-jpeg": ["-c:v", "libx264", "-an", "-f", "flv"],
"preset-rtmp-ubiquiti": [
"-c:v",
"copy",
"-f",
"flv",
"-ar",
"44100",
"-c:a",
"aac",
],
}
def parse_preset_output_rtmp(arg: Any) -> list[str]:
"""Return the correct preset if in preset format otherwise return None."""
if not isinstance(arg, str):
return None
return PRESETS_RTMP_OUTPUT.get(arg, None)

View File

@ -8,6 +8,7 @@ import re
import subprocess as sp import subprocess as sp
import time import time
import traceback import traceback
from collections import defaultdict
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from functools import reduce from functools import reduce
from pathlib import Path from pathlib import Path
@ -15,6 +16,7 @@ from urllib.parse import unquote
import cv2 import cv2
import numpy as np import numpy as np
import pandas as pd
import pytz import pytz
import requests import requests
from flask import ( from flask import (
@ -43,7 +45,7 @@ from frigate.const import (
RECORD_DIR, RECORD_DIR,
) )
from frigate.events.external import ExternalEventProcessor from frigate.events.external import ExternalEventProcessor
from frigate.models import Event, Recordings, Regions, Timeline from frigate.models import Event, Previews, Recordings, Regions, Timeline
from frigate.object_processing import TrackedObject from frigate.object_processing import TrackedObject
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
@ -389,6 +391,17 @@ def set_sub_label(id):
new_sub_label = json.get("subLabel") new_sub_label = json.get("subLabel")
new_score = json.get("subLabelScore") new_score = json.get("subLabelScore")
if new_sub_label is None:
return make_response(
jsonify(
{
"success": False,
"message": "A sub label must be supplied",
}
),
400,
)
if new_sub_label and len(new_sub_label) > 100: if new_sub_label and len(new_sub_label) > 100:
return make_response( return make_response(
jsonify( jsonify(
@ -414,6 +427,7 @@ def set_sub_label(id):
) )
if not event.end_time: if not event.end_time:
# update tracked object
tracked_obj: TrackedObject = ( tracked_obj: TrackedObject = (
current_app.detected_frames_processor.camera_states[ current_app.detected_frames_processor.camera_states[
event.camera event.camera
@ -423,6 +437,11 @@ def set_sub_label(id):
if tracked_obj: if tracked_obj:
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score) tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == id).execute()
event.sub_label = new_sub_label event.sub_label = new_sub_label
if new_score: if new_score:
@ -611,6 +630,189 @@ def timeline():
return jsonify([t for t in timeline]) return jsonify([t for t in timeline])
@bp.route("/timeline/hourly")
def hourly_timeline():
"""Get hourly summary for timeline."""
cameras = request.args.get("cameras", "all")
labels = request.args.get("labels", "all")
before = request.args.get("before", type=float)
after = request.args.get("after", type=float)
limit = request.args.get("limit", 200)
tz_name = request.args.get("timezone", default="utc", type=str)
_, minute_modifier, _ = get_tz_modifiers(tz_name)
minute_offset = int(minute_modifier.split(" ")[0])
clauses = []
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Timeline.camera << camera_list))
if labels != "all":
label_list = labels.split(",")
clauses.append((Timeline.data["label"] << label_list))
if before:
clauses.append((Timeline.timestamp < before))
if after:
clauses.append((Timeline.timestamp > after))
if len(clauses) == 0:
clauses.append((True))
timeline = (
Timeline.select(
Timeline.camera,
Timeline.timestamp,
Timeline.data,
Timeline.class_type,
Timeline.source_id,
Timeline.source,
)
.where(reduce(operator.and_, clauses))
.order_by(Timeline.timestamp.desc())
.limit(limit)
.dicts()
.iterator()
)
count = 0
start = 0
end = 0
hours: dict[str, list[dict[str, any]]] = {}
for t in timeline:
if count == 0:
start = t["timestamp"]
else:
end = t["timestamp"]
count += 1
hour = (
datetime.fromtimestamp(t["timestamp"]).replace(
minute=0, second=0, microsecond=0
)
+ timedelta(
minutes=minute_offset,
)
).timestamp()
if hour not in hours:
hours[hour] = [t]
else:
hours[hour].insert(0, t)
return jsonify(
{
"start": start,
"end": end,
"count": count,
"hours": hours,
}
)
@bp.route("/<camera_name>/recording/hourly/activity")
def hourly_timeline_activity(camera_name: str):
"""Get hourly summary for timeline."""
if camera_name not in current_app.frigate_config.cameras:
return make_response(
jsonify({"success": False, "message": "Camera not found"}),
404,
)
before = request.args.get("before", type=float, default=datetime.now())
after = request.args.get(
"after", type=float, default=datetime.now() - timedelta(hours=1)
)
tz_name = request.args.get("timezone", default="utc", type=str)
_, minute_modifier, _ = get_tz_modifiers(tz_name)
minute_offset = int(minute_modifier.split(" ")[0])
all_recordings: list[Recordings] = (
Recordings.select(
Recordings.start_time,
Recordings.duration,
Recordings.objects,
Recordings.motion,
)
.where(Recordings.camera == camera_name)
.where(Recordings.motion > 0)
.where((Recordings.start_time > after) & (Recordings.end_time < before))
.order_by(Recordings.start_time.asc())
.iterator()
)
# data format is ex:
# {timestamp: [{ date: 1, count: 1, type: motion }]}] }}
hours: dict[int, list[dict[str, any]]] = defaultdict(list)
key = datetime.fromtimestamp(after).replace(second=0, microsecond=0) + timedelta(
minutes=minute_offset
)
check = (key + timedelta(hours=1)).timestamp()
# set initial start so data is representative of full hour
hours[int(key.timestamp())].append(
[
key.timestamp(),
0,
False,
]
)
for recording in all_recordings:
if recording.start_time > check:
hours[int(key.timestamp())].append(
[
(key + timedelta(minutes=59, seconds=59)).timestamp(),
0,
False,
]
)
key = key + timedelta(hours=1)
check = (key + timedelta(hours=1)).timestamp()
hours[int(key.timestamp())].append(
[
key.timestamp(),
0,
False,
]
)
data_type = recording.objects > 0
count = recording.motion + recording.objects
hours[int(key.timestamp())].append(
[
recording.start_time + (recording.duration / 2),
0 if count == 0 else np.log2(count),
data_type,
]
)
# resample data using pandas to get activity on minute to minute basis
for key, data in hours.items():
df = pd.DataFrame(data, columns=["date", "count", "hasObjects"])
# set date as datetime index
df["date"] = pd.to_datetime(df["date"], unit="s")
df.set_index(["date"], inplace=True)
# normalize data
df = df.resample("T").mean().fillna(0)
# change types for output
df.index = df.index.astype(int) // (10**9)
df["count"] = df["count"].astype(int)
df["hasObjects"] = df["hasObjects"].astype(bool)
hours[key] = df.reset_index().to_dict("records")
return jsonify(hours)
@bp.route("/<camera_name>/<label>/best.jpg") @bp.route("/<camera_name>/<label>/best.jpg")
@bp.route("/<camera_name>/<label>/thumbnail.jpg") @bp.route("/<camera_name>/<label>/thumbnail.jpg")
def label_thumbnail(camera_name, label): def label_thumbnail(camera_name, label):
@ -1674,6 +1876,7 @@ def recordings(camera_name):
Recordings.segment_size, Recordings.segment_size,
Recordings.motion, Recordings.motion,
Recordings.objects, Recordings.objects,
Recordings.duration,
) )
.where( .where(
Recordings.camera == camera_name, Recordings.camera == camera_name,
@ -1845,7 +2048,6 @@ def vod_hour_no_timezone(year_month, day, hour, camera_name):
) )
# TODO make this nicer when vod module is removed
@bp.route("/vod/<year_month>/<day>/<hour>/<camera_name>/<tz_name>") @bp.route("/vod/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
def vod_hour(year_month, day, hour, camera_name, tz_name): def vod_hour(year_month, day, hour, camera_name, tz_name):
parts = year_month.split("-") parts = year_month.split("-")
@ -1860,6 +2062,110 @@ def vod_hour(year_month, day, hour, camera_name, tz_name):
return vod_ts(camera_name, start_ts, end_ts) return vod_ts(camera_name, start_ts, end_ts)
@bp.route("/preview/<camera_name>/start/<int:start_ts>/end/<int:end_ts>")
@bp.route("/preview/<camera_name>/start/<float:start_ts>/end/<float:end_ts>")
def preview_ts(camera_name, start_ts, end_ts):
"""Get all mp4 previews relevant for time period."""
if camera_name != "all":
camera_clause = Previews.camera == camera_name
else:
camera_clause = True
previews = (
Previews.select(
Previews.camera,
Previews.path,
Previews.duration,
Previews.start_time,
Previews.end_time,
)
.where(
Previews.start_time.between(start_ts, end_ts)
| Previews.end_time.between(start_ts, end_ts)
| ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
)
.where(camera_clause)
.order_by(Previews.start_time.asc())
.dicts()
.iterator()
)
clips = []
preview: Previews
for preview in previews:
clips.append(
{
"camera": preview["camera"],
"src": preview["path"].replace("/media/frigate", ""),
"type": "video/mp4",
"start": preview["start_time"],
"end": preview["end_time"],
}
)
if not clips:
return make_response(
jsonify(
{
"success": False,
"message": "No previews found.",
}
),
404,
)
return make_response(jsonify(clips), 200)
@bp.route("/preview/<year_month>/<day>/<hour>/<camera_name>/<tz_name>")
def preview_hour(year_month, day, hour, camera_name, tz_name):
parts = year_month.split("-")
start_date = (
datetime(int(parts[0]), int(parts[1]), int(day), int(hour), tzinfo=timezone.utc)
- datetime.now(pytz.timezone(tz_name.replace(",", "/"))).utcoffset()
)
end_date = start_date + timedelta(hours=1) - timedelta(milliseconds=1)
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
return preview_ts(camera_name, start_ts, end_ts)
@bp.route("/preview/<camera_name>/<frame_time>/thumbnail.jpg")
def preview_thumbnail(camera_name, frame_time):
"""Get a thumbnail from the cached preview jpgs."""
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
file_start = f"preview_{camera_name}"
file_check = f"{file_start}-{frame_time}.jpg"
selected_preview = None
for file in os.listdir(preview_dir):
if file.startswith(file_start):
if file < file_check:
selected_preview = file
break
if selected_preview is None:
return make_response(
jsonify(
{
"success": False,
"message": "Could not find valid preview jpg.",
}
),
404,
)
with open(os.path.join(preview_dir, selected_preview), "rb") as image_file:
jpg_bytes = image_file.read()
response = make_response(jpg_bytes)
response.headers["Content-Type"] = "image/jpeg"
response.headers["Cache-Control"] = "private, max-age=31536000"
return response
@bp.route("/vod/event/<id>") @bp.route("/vod/event/<id>")
def vod_event(id): def vod_event(id):
try: try:

View File

@ -76,6 +76,15 @@ class Recordings(Model): # type: ignore[misc]
segment_size = FloatField(default=0) # this should be stored as MB segment_size = FloatField(default=0) # this should be stored as MB
class Previews(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)
path = CharField(unique=True)
start_time = DateTimeField()
end_time = DateTimeField()
duration = FloatField()
# Used for temporary table in record/cleanup.py # Used for temporary table in record/cleanup.py
class RecordingsToDelete(Model): # type: ignore[misc] class RecordingsToDelete(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=False, max_length=30) id = CharField(null=False, primary_key=False, max_length=30)

View File

@ -19,6 +19,7 @@ from frigate.config import (
MqttConfig, MqttConfig,
RecordConfig, RecordConfig,
SnapshotsConfig, SnapshotsConfig,
ZoomingModeEnum,
) )
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
@ -512,6 +513,39 @@ class CameraState:
thickness = 5 thickness = 5
color = self.config.model.colormap[obj["label"]] color = self.config.model.colormap[obj["label"]]
# debug autotracking zooming - show the zoom factor box
if (
self.camera_config.onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
):
max_target_box = self.ptz_autotracker_thread.ptz_autotracker.tracked_object_metrics[
self.name
]["max_target_box"]
side_length = max_target_box * (
max(
self.camera_config.detect.width,
self.camera_config.detect.height,
)
)
centroid_x = (obj["box"][0] + obj["box"][2]) // 2
centroid_y = (obj["box"][1] + obj["box"][3]) // 2
top_left = (
int(centroid_x - side_length // 2),
int(centroid_y - side_length // 2),
)
bottom_right = (
int(centroid_x + side_length // 2),
int(centroid_y + side_length // 2),
)
cv2.rectangle(
frame_copy,
top_left,
bottom_right,
(255, 255, 0),
2,
)
# draw the bounding boxes on the frame # draw the bounding boxes on the frame
box = obj["box"] box = obj["box"]
text = ( text = (

View File

@ -1,3 +1,5 @@
"""Handle outputting birdseye frames via jsmpeg and go2rtc."""
import datetime import datetime
import glob import glob
import logging import logging
@ -5,23 +7,13 @@ import math
import multiprocessing as mp import multiprocessing as mp
import os import os
import queue import queue
import signal
import subprocess as sp import subprocess as sp
import threading import threading
import traceback import traceback
from wsgiref.simple_server import make_server
import cv2 import cv2
import numpy as np import numpy as np
from setproctitle import setproctitle
from ws4py.server.wsgirefserver import (
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
WSGIServer,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.ws import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.types import CameraMetricsTypes from frigate.types import CameraMetricsTypes
@ -672,66 +664,20 @@ class BirdsEyeFrameManager:
return False return False
def output_frames( class Birdseye:
config: FrigateConfig, def __init__(
video_output_queue, self,
camera_metrics: dict[str, CameraMetricsTypes], config: FrigateConfig,
): frame_manager: SharedMemoryFrameManager,
threading.current_thread().name = "output" camera_metrics: dict[str, CameraMetricsTypes],
setproctitle("frigate.output") stop_event: mp.Event,
websocket_server,
stop_event = mp.Event() ) -> None:
self.config = config
def receiveSignal(signalNumber, frame): self.input = queue.Queue(maxsize=10)
stop_event.set() self.converter = FFMpegConverter(
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
previous_frames = {}
# start a websocket server on 8082
WebSocketWSGIHandler.http_version = "1.1"
websocket_server = make_server(
"127.0.0.1",
8082,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=WebSocket),
)
websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever)
inputs: dict[str, queue.Queue] = {}
converters = {}
broadcasters = {}
for camera, cam_config in config.cameras.items():
inputs[camera] = queue.Queue(maxsize=cam_config.detect.fps)
width = int(
cam_config.live.height
* (cam_config.frame_shape[1] / cam_config.frame_shape[0])
)
converters[camera] = FFMpegConverter(
camera,
inputs[camera],
stop_event,
cam_config.frame_shape[1],
cam_config.frame_shape[0],
width,
cam_config.live.height,
cam_config.live.quality,
)
broadcasters[camera] = BroadcastThread(
camera, converters[camera], websocket_server, stop_event
)
if config.birdseye.enabled:
inputs["birdseye"] = queue.Queue(maxsize=10)
converters["birdseye"] = FFMpegConverter(
"birdseye", "birdseye",
inputs["birdseye"], self.input,
stop_event, stop_event,
config.birdseye.width, config.birdseye.width,
config.birdseye.height, config.birdseye.height,
@ -740,107 +686,48 @@ def output_frames(
config.birdseye.quality, config.birdseye.quality,
config.birdseye.restream, config.birdseye.restream,
) )
broadcasters["birdseye"] = BroadcastThread( self.broadcaster = BroadcastThread(
"birdseye", "birdseye", self.converter, websocket_server, stop_event
converters["birdseye"], )
websocket_server, self.birdseye_manager = BirdsEyeFrameManager(
stop_event, config, frame_manager, stop_event, camera_metrics
) )
websocket_thread.start() if config.birdseye.restream:
self.birdseye_buffer = frame_manager.create(
"birdseye",
self.birdseye_manager.yuv_shape[0] * self.birdseye_manager.yuv_shape[1],
)
for t in converters.values(): self.converter.start()
t.start() self.broadcaster.start()
for t in broadcasters.values(): def write_data(
t.start() self,
camera: str,
birdseye_manager = BirdsEyeFrameManager( current_tracked_objects: list[dict[str, any]],
config, frame_manager, stop_event, camera_metrics motion_boxes: list[list[int]],
) frame_time: float,
frame,
if config.birdseye.restream: ) -> None:
birdseye_buffer = frame_manager.create( if self.birdseye_manager.update(
"birdseye", camera,
birdseye_manager.yuv_shape[0] * birdseye_manager.yuv_shape[1], len([o for o in current_tracked_objects if not o["stationary"]]),
) len(motion_boxes),
frame_time,
while not stop_event.is_set(): frame,
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 1)
except queue.Empty:
continue
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
): ):
# write to the converter for the camera if clients are listening to the specific camera frame_bytes = self.birdseye_manager.frame.tobytes()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try: try:
inputs[camera].put_nowait(frame.tobytes()) self.input.put_nowait(frame_bytes)
except queue.Full: except queue.Full:
# drop frames if queue is full # drop frames if queue is full
pass pass
if config.birdseye.enabled and ( def stop(self) -> None:
config.birdseye.restream self.converter.join()
or any( self.broadcaster.join()
ws.environ["PATH_INFO"].endswith("birdseye")
for ws in websocket_server.manager
)
):
if birdseye_manager.update(
camera,
len([o for o in current_tracked_objects if not o["stationary"]]),
len(motion_boxes),
frame_time,
frame,
):
frame_bytes = birdseye_manager.frame.tobytes()
if config.birdseye.restream:
birdseye_buffer[:] = frame_bytes
try:
inputs["birdseye"].put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
previous_frames[camera] = frame_time
while not video_output_queue.empty():
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id)
for b in broadcasters.values():
b.join()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()
websocket_server.shutdown()
websocket_thread.join()
logger.info("exiting output process...")

165
frigate/output/camera.py Normal file
View File

@ -0,0 +1,165 @@
"""Handle outputting individual cameras via jsmpeg."""
import logging
import multiprocessing as mp
import queue
import subprocess as sp
import threading
from frigate.config import CameraConfig
logger = logging.getLogger(__name__)
class FFMpegConverter(threading.Thread):
def __init__(
self,
camera: str,
input_queue: queue.Queue,
stop_event: mp.Event,
in_width: int,
in_height: int,
out_width: int,
out_height: int,
quality: int,
):
threading.Thread.__init__(self)
self.name = f"{camera}_output_converter"
self.camera = camera
self.input_queue = input_queue
self.stop_event = stop_event
ffmpeg_cmd = [
"ffmpeg",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
"-video_size",
f"{in_width}x{in_height}",
"-i",
"pipe:",
"-f",
"mpegts",
"-s",
f"{out_width}x{out_height}",
"-codec:v",
"mpeg1video",
"-q",
f"{quality}",
"-bf",
"0",
"pipe:",
]
self.process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=sp.DEVNULL,
stdin=sp.PIPE,
start_new_session=True,
)
def __write(self, b) -> None:
self.process.stdin.write(b)
def read(self, length):
try:
return self.process.stdout.read1(length)
except ValueError:
return False
def exit(self):
self.process.terminate()
try:
self.process.communicate(timeout=30)
except sp.TimeoutExpired:
self.process.kill()
self.process.communicate()
def run(self) -> None:
while not self.stop_event.is_set():
try:
frame = self.input_queue.get(True, timeout=1)
self.__write(frame)
except queue.Empty:
pass
self.exit()
class BroadcastThread(threading.Thread):
def __init__(
self,
camera: str,
converter: FFMpegConverter,
websocket_server,
stop_event: mp.Event,
):
super(BroadcastThread, self).__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
def run(self):
while not self.stop_event.is_set():
buf = self.converter.read(65536)
if buf:
manager = self.websocket_server.manager
with manager.lock:
websockets = manager.websockets.copy()
ws_iter = iter(websockets.values())
for ws in ws_iter:
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
):
try:
ws.send(buf, binary=True)
except ValueError:
pass
except (BrokenPipeError, ConnectionResetError) as e:
logger.debug(f"Websocket unexpectedly closed {e}")
elif self.converter.process.poll() is not None:
break
class JsmpegCamera:
def __init__(
self, config: CameraConfig, stop_event: mp.Event, websocket_server
) -> None:
self.config = config
self.input = queue.Queue(maxsize=config.detect.fps)
width = int(
config.live.height * (config.frame_shape[1] / config.frame_shape[0])
)
self.converter = FFMpegConverter(
config.name,
self.input,
stop_event,
config.frame_shape[1],
config.frame_shape[0],
width,
config.live.height,
config.live.quality,
)
self.broadcaster = BroadcastThread(
config.name, self.converter, websocket_server, stop_event
)
self.converter.start()
self.broadcaster.start()
def write_frame(self, frame_bytes) -> None:
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def stop(self) -> None:
self.converter.join()
self.broadcaster.join()

157
frigate/output/output.py Normal file
View File

@ -0,0 +1,157 @@
"""Handle outputting raw frigate frames"""
import logging
import multiprocessing as mp
import queue
import signal
import threading
from typing import Optional
from wsgiref.simple_server import make_server
from setproctitle import setproctitle
from ws4py.server.wsgirefserver import (
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
WSGIServer,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig
from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder
from frigate.types import CameraMetricsTypes
from frigate.util.image import SharedMemoryFrameManager
logger = logging.getLogger(__name__)
def output_frames(
config: FrigateConfig,
video_output_queue: mp.Queue,
inter_process_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes],
):
threading.current_thread().name = "output"
setproctitle("frigate.output")
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
previous_frames = {}
# start a websocket server on 8082
WebSocketWSGIHandler.http_version = "1.1"
websocket_server = make_server(
"127.0.0.1",
8082,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=WebSocket),
)
websocket_server.initialize_websockets_manager()
websocket_thread = threading.Thread(target=websocket_server.serve_forever)
jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Optional[Birdseye] = None
preview_recorders: dict[str, PreviewRecorder] = {}
for camera, cam_config in config.cameras.items():
if not cam_config.enabled:
continue
jsmpeg_cameras[camera] = JsmpegCamera(cam_config, stop_event, websocket_server)
preview_recorders[camera] = PreviewRecorder(cam_config, inter_process_queue)
if config.birdseye.enabled:
birdseye = Birdseye(
config, frame_manager, camera_metrics, stop_event, websocket_server
)
websocket_thread.start()
while not stop_event.is_set():
try:
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 1)
except queue.Empty:
continue
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
):
# write to the converter for the camera if clients are listening to the specific camera
jsmpeg_cameras[camera].write_frame(frame.tobytes())
# send output data to birdseye if websocket is connected or restreaming
if config.birdseye.enabled and (
config.birdseye.restream
or any(
ws.environ["PATH_INFO"].endswith("birdseye")
for ws in websocket_server.manager
)
):
birdseye.write_data(
camera,
current_tracked_objects,
motion_boxes,
frame_time,
frame,
)
# send frames for low fps recording
preview_recorders[camera].write_data(
current_tracked_objects, motion_boxes, frame_time, frame
)
# delete frames after they have been used for output
if camera in previous_frames:
frame_manager.delete(f"{camera}{previous_frames[camera]}")
previous_frames[camera] = frame_time
while not video_output_queue.empty():
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
frame_id = f"{camera}{frame_time}"
frame = frame_manager.get(frame_id, config.cameras[camera].frame_shape_yuv)
frame_manager.delete(frame_id)
for jsmpeg in jsmpeg_cameras.values():
jsmpeg.stop()
for preview in preview_recorders.values():
preview.stop()
if birdseye is not None:
birdseye.stop()
websocket_server.manager.close_all()
websocket_server.manager.stop()
websocket_server.manager.join()
websocket_server.shutdown()
websocket_thread.join()
logger.info("exiting output process...")

264
frigate/output/preview.py Normal file
View File

@ -0,0 +1,264 @@
"""Handle outputting low res / fps preview segments from decoded frames."""
import datetime
import logging
import multiprocessing as mp
import os
import shutil
import subprocess as sp
import threading
from pathlib import Path
import cv2
import numpy as np
from frigate.config import CameraConfig, RecordQualityEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, INSERT_PREVIEW
from frigate.ffmpeg_presets import (
FPS_VFR_PARAM,
EncodeTypeEnum,
parse_preset_hardware_acceleration_encode,
)
from frigate.models import Previews
from frigate.util.image import copy_yuv_to_position, get_yuv_crop
logger = logging.getLogger(__name__)
FOLDER_PREVIEW_FRAMES = "preview_frames"
PREVIEW_OUTPUT_FPS = 1
PREVIEW_SEGMENT_DURATION = 3600 # one hour
# important to have lower keyframe to maintain scrubbing performance
PREVIEW_KEYFRAME_INTERVAL = 60
PREVIEW_BIT_RATES = {
RecordQualityEnum.very_low: 4096,
RecordQualityEnum.low: 6144,
RecordQualityEnum.medium: 8192,
RecordQualityEnum.high: 12288,
RecordQualityEnum.very_high: 16384,
}
def get_cache_image_name(camera: str, frame_time: float) -> str:
"""Get the image name in cache."""
return os.path.join(
CACHE_DIR,
f"{FOLDER_PREVIEW_FRAMES}/preview_{camera}-{frame_time}.jpg",
)
class FFMpegConverter(threading.Thread):
"""Convert a list of jpg frames into a vfr mp4."""
def __init__(
self,
config: CameraConfig,
frame_times: list[float],
inter_process_queue: mp.Queue,
):
threading.Thread.__init__(self)
self.name = f"{config.name}_preview_converter"
self.config = config
self.frame_times = frame_times
self.inter_process_queue = inter_process_queue
self.path = os.path.join(
CLIPS_DIR,
f"previews/{self.config.name}/{self.frame_times[0]}-{self.frame_times[-1]}.mp4",
)
# write a PREVIEW at fps and 1 key frame per clip
self.ffmpeg_cmd = parse_preset_hardware_acceleration_encode(
config.ffmpeg.hwaccel_args,
input="-f concat -y -protocol_whitelist pipe,file -safe 0 -i /dev/stdin",
output=f"-g {PREVIEW_KEYFRAME_INTERVAL} -fpsmax {PREVIEW_OUTPUT_FPS} -bf 0 -b:v {PREVIEW_BIT_RATES[self.config.record.preview.quality]} {FPS_VFR_PARAM} -movflags +faststart -pix_fmt yuv420p {self.path}",
type=EncodeTypeEnum.preview,
)
def run(self) -> None:
# generate input list
item_count = len(self.frame_times)
playlist = []
for t_idx in range(0, item_count):
if t_idx == item_count - 1:
# last frame does not get a duration
playlist.append(
f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'"
)
continue
playlist.append(
f"file '{get_cache_image_name(self.config.name, self.frame_times[t_idx])}'"
)
playlist.append(
f"duration {self.frame_times[t_idx + 1] - self.frame_times[t_idx]}"
)
p = sp.run(
self.ffmpeg_cmd.split(" "),
input="\n".join(playlist),
encoding="ascii",
capture_output=True,
)
start = self.frame_times[0]
end = self.frame_times[-1]
if p.returncode == 0:
logger.debug("successfully saved preview")
self.inter_process_queue.put_nowait(
(
INSERT_PREVIEW,
{
Previews.id: f"{self.config.name}_{end}",
Previews.camera: self.config.name,
Previews.path: self.path,
Previews.start_time: start,
Previews.end_time: end,
Previews.duration: end - start,
},
)
)
else:
logger.error(f"Error saving preview for {self.config.name} :: {p.stderr}")
# unlink files from cache
# don't delete last frame as it will be used as first frame in next segment
for t in self.frame_times[0:-1]:
Path(get_cache_image_name(self.config.name, t)).unlink(missing_ok=True)
class PreviewRecorder:
def __init__(self, config: CameraConfig, inter_process_queue: mp.Queue) -> None:
self.config = config
self.inter_process_queue = inter_process_queue
self.start_time = 0
self.last_output_time = 0
self.output_frames = []
self.out_height = 160
self.out_width = (
int((config.detect.width / config.detect.height) * self.out_height) // 4 * 4
)
y, u1, u2, v1, v2 = get_yuv_crop(
self.config.frame_shape_yuv,
(
0,
0,
self.config.frame_shape[1],
self.config.frame_shape[0],
),
)
self.channel_dims = {
"y": y,
"u1": u1,
"u2": u2,
"v1": v1,
"v2": v2,
}
# end segment at end of hour
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
Path(os.path.join(CACHE_DIR, "preview_frames")).mkdir(exist_ok=True)
Path(os.path.join(CLIPS_DIR, f"previews/{config.name}")).mkdir(
parents=True, exist_ok=True
)
def should_write_frame(
self,
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
) -> bool:
"""Decide if this frame should be added to PREVIEW."""
# limit output to 1 fps
if (frame_time - self.last_output_time) < 1 / PREVIEW_OUTPUT_FPS:
return False
# send frame if a non-stationary object is in a zone
if any(
(len(o["current_zones"]) > 0 and not o["stationary"])
for o in current_tracked_objects
):
self.last_output_time = frame_time
return True
if len(motion_boxes) > 0:
self.last_output_time = frame_time
return True
return False
def write_frame_to_cache(self, frame_time: float, frame) -> None:
# resize yuv frame
small_frame = np.zeros((self.out_height * 3 // 2, self.out_width), np.uint8)
copy_yuv_to_position(
small_frame,
(0, 0),
(self.out_height, self.out_width),
frame,
self.channel_dims,
cv2.INTER_AREA,
)
small_frame = cv2.cvtColor(
small_frame,
cv2.COLOR_YUV2BGR_I420,
)
_, jpg = cv2.imencode(".jpg", small_frame)
with open(
get_cache_image_name(self.config.name, frame_time),
"wb",
) as j:
j.write(jpg.tobytes())
def write_data(
self,
current_tracked_objects: list[dict[str, any]],
motion_boxes: list[list[int]],
frame_time: float,
frame,
) -> None:
# always write the first frame
if self.start_time == 0:
self.start_time = frame_time
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
return
# check if PREVIEW clip should be generated and cached frames reset
if frame_time >= self.segment_end:
# save last frame to ensure consistent duration
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
FFMpegConverter(
self.config,
self.output_frames,
self.inter_process_queue,
).start()
# reset frame cache
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
self.start_time = frame_time
self.last_output_time = frame_time
self.output_frames = []
# include first frame to ensure consistent duration
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time):
self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame)
def stop(self) -> None:
try:
shutil.rmtree(os.path.join(CACHE_DIR, FOLDER_PREVIEW_FRAMES))
except FileNotFoundError:
pass

View File

@ -527,16 +527,28 @@ class PtzAutoTracker:
return np.dot(self.move_coefficients[camera], input_data) return np.dot(self.move_coefficients[camera], input_data)
def _predict_area_after_time(self, camera, time):
return np.dot(
self.tracked_object_metrics[camera]["area_coefficients"],
[self.tracked_object_history[camera][-1]["frame_time"] + time],
)
def _calculate_tracked_object_metrics(self, camera, obj): def _calculate_tracked_object_metrics(self, camera, obj):
def remove_outliers(data): def remove_outliers(data):
Q1 = np.percentile(data, 25) areas = [item["area"] for item in data]
Q3 = np.percentile(data, 75)
Q1 = np.percentile(areas, 25)
Q3 = np.percentile(areas, 75)
IQR = Q3 - Q1 IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR upper_bound = Q3 + 1.5 * IQR
filtered_data = [x for x in data if lower_bound <= x <= upper_bound]
removed_values = [x for x in data if x not in filtered_data] filtered_data = [
item for item in data if lower_bound <= item["area"] <= upper_bound
]
# Find and log the removed values
removed_values = [item for item in data if item not in filtered_data]
logger.debug(f"{camera}: Removed area outliers: {removed_values}") logger.debug(f"{camera}: Removed area outliers: {removed_values}")
return filtered_data return filtered_data
@ -548,18 +560,43 @@ class PtzAutoTracker:
# Extract areas and calculate weighted average # Extract areas and calculate weighted average
# grab the largest dimension of the bounding box and create a square from that # grab the largest dimension of the bounding box and create a square from that
areas = [ areas = [
max(obj["box"][2] - obj["box"][0], obj["box"][3] - obj["box"][1]) ** 2 {
"frame_time": obj["frame_time"],
"box": obj["box"],
"area": max(
obj["box"][2] - obj["box"][0], obj["box"][3] - obj["box"][1]
)
** 2,
}
for obj in self.tracked_object_history[camera] for obj in self.tracked_object_history[camera]
] ]
filtered_areas = ( filtered_areas = remove_outliers(areas) if len(areas) >= 2 else areas
remove_outliers(areas)
if len(areas) >= self.config.cameras[camera].detect.fps / 2 # Filter entries that are not touching the frame edge
else areas filtered_areas_not_touching_edge = [
) entry
for entry in filtered_areas
if self._touching_frame_edges(camera, entry["box"]) == 0
]
# Calculate regression for area change predictions
if len(filtered_areas_not_touching_edge):
X = np.array(
[item["frame_time"] for item in filtered_areas_not_touching_edge]
)
y = np.array([item["area"] for item in filtered_areas_not_touching_edge])
self.tracked_object_metrics[camera]["area_coefficients"] = np.linalg.lstsq(
X.reshape(-1, 1), y, rcond=None
)[0]
else:
self.tracked_object_metrics[camera]["area_coefficients"] = np.array([0])
weights = np.arange(1, len(filtered_areas) + 1) weights = np.arange(1, len(filtered_areas) + 1)
weighted_area = np.average(filtered_areas, weights=weights) weighted_area = np.average(
[item["area"] for item in filtered_areas], weights=weights
)
self.tracked_object_metrics[camera]["target_box"] = ( self.tracked_object_metrics[camera]["target_box"] = (
weighted_area / (camera_width * camera_height) weighted_area / (camera_width * camera_height)
@ -681,26 +718,28 @@ class PtzAutoTracker:
self._calculate_move_coefficients(camera) self._calculate_move_coefficients(camera)
def _enqueue_move(self, camera, frame_time, pan, tilt, zoom): def _enqueue_move(self, camera, frame_time, pan, tilt, zoom):
def split_value(value): def split_value(value, suppress_diff=True):
clipped = np.clip(value, -1, 1) clipped = np.clip(value, -1, 1)
return clipped, value - clipped
# don't make small movements
if -0.05 < clipped < 0.05 and suppress_diff:
diff = 0.0
else:
diff = value - clipped
return clipped, diff
if ( if (
frame_time > self.ptz_metrics[camera]["ptz_start_time"].value frame_time > self.ptz_metrics[camera]["ptz_start_time"].value
and frame_time > self.ptz_metrics[camera]["ptz_stop_time"].value and frame_time > self.ptz_metrics[camera]["ptz_stop_time"].value
and not self.move_queue_locks[camera].locked() and not self.move_queue_locks[camera].locked()
): ):
# don't make small movements # we can split up any large moves caused by velocity estimated movements if necessary
if abs(pan) < 0.02: # get an excess amount and assign it instead of 0 below
pan = 0
if abs(tilt) < 0.02:
tilt = 0
# split up any large moves caused by velocity estimated movements
while pan != 0 or tilt != 0 or zoom != 0: while pan != 0 or tilt != 0 or zoom != 0:
pan, pan_excess = split_value(pan) pan, _ = split_value(pan)
tilt, tilt_excess = split_value(tilt) tilt, _ = split_value(tilt)
zoom, zoom_excess = split_value(zoom) zoom, _ = split_value(zoom, False)
logger.debug( logger.debug(
f"{camera}: Enqueue movement for frame time: {frame_time} pan: {pan}, tilt: {tilt}, zoom: {zoom}" f"{camera}: Enqueue movement for frame time: {frame_time} pan: {pan}, tilt: {tilt}, zoom: {zoom}"
@ -708,9 +747,25 @@ class PtzAutoTracker:
move_data = (frame_time, pan, tilt, zoom) move_data = (frame_time, pan, tilt, zoom)
self.move_queues[camera].put(move_data) self.move_queues[camera].put(move_data)
pan = pan_excess # reset values to not split up large movements
tilt = tilt_excess pan = 0
zoom = zoom_excess tilt = 0
zoom = 0
def _touching_frame_edges(self, camera, box):
camera_config = self.config.cameras[camera]
camera_width = camera_config.frame_shape[1]
camera_height = camera_config.frame_shape[0]
bb_left, bb_top, bb_right, bb_bottom = box
edge_threshold = AUTOTRACKING_ZOOM_EDGE_THRESHOLD
return int(
(bb_left < edge_threshold * camera_width)
+ (bb_right > (1 - edge_threshold) * camera_width)
+ (bb_top < edge_threshold * camera_height)
+ (bb_bottom > (1 - edge_threshold) * camera_height)
)
def _get_valid_velocity(self, camera, obj): def _get_valid_velocity(self, camera, obj):
# returns a tuple and euclidean distance if the estimated velocity is valid # returns a tuple and euclidean distance if the estimated velocity is valid
@ -722,7 +777,9 @@ class PtzAutoTracker:
# estimate_velocity is a numpy array of bbox top,left and bottom,right velocities # estimate_velocity is a numpy array of bbox top,left and bottom,right velocities
velocities = obj.obj_data["estimate_velocity"] velocities = obj.obj_data["estimate_velocity"]
logger.debug(f"{camera}: Velocities from norfair: {velocities}") logger.debug(
f"{camera}: Velocity (Norfair): {tuple(np.round(velocities).flatten().astype(int))}"
)
# if we are close enough to zero, return right away # if we are close enough to zero, return right away
if np.all(np.round(velocities) == 0): if np.all(np.round(velocities) == 0):
@ -827,7 +884,7 @@ class PtzAutoTracker:
return distance_threshold return distance_threshold
def _should_zoom_in(self, camera, obj, box, debug_zooming=False): def _should_zoom_in(self, camera, obj, box, predicted_time, debug_zooming=False):
# returns True if we should zoom in, False if we should zoom out, None to do nothing # returns True if we should zoom in, False if we should zoom out, None to do nothing
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
camera_width = camera_config.frame_shape[1] camera_width = camera_config.frame_shape[1]
@ -838,9 +895,6 @@ class PtzAutoTracker:
bb_left, bb_top, bb_right, bb_bottom = box bb_left, bb_top, bb_right, bb_bottom = box
# TODO: Take into account the area changing when an object is moving out of frame
edge_threshold = AUTOTRACKING_ZOOM_EDGE_THRESHOLD
# calculate a velocity threshold based on movement coefficients if available # calculate a velocity threshold based on movement coefficients if available
if camera_config.onvif.autotracking.movement_weights: if camera_config.onvif.autotracking.movement_weights:
predicted_movement_time = self._predict_movement_time(camera, 1, 1) predicted_movement_time = self._predict_movement_time(camera, 1, 1)
@ -851,12 +905,8 @@ class PtzAutoTracker:
velocity_threshold_x = camera_width * 0.02 velocity_threshold_x = camera_width * 0.02
velocity_threshold_y = camera_height * 0.02 velocity_threshold_y = camera_height * 0.02
touching_frame_edges = int( # return a count of the number of frame edges the bounding box is touching
(bb_left < edge_threshold * camera_width) touching_frame_edges = self._touching_frame_edges(camera, box)
+ (bb_right > (1 - edge_threshold) * camera_width)
+ (bb_top < edge_threshold * camera_height)
+ (bb_bottom > (1 - edge_threshold) * camera_height)
)
# make sure object is centered in the frame # make sure object is centered in the frame
below_distance_threshold = self.tracked_object_metrics[camera][ below_distance_threshold = self.tracked_object_metrics[camera][
@ -873,19 +923,28 @@ class PtzAutoTracker:
< np.tile([velocity_threshold_x, velocity_threshold_y], 2) < np.tile([velocity_threshold_x, velocity_threshold_y], 2)
) or np.all(average_velocity == 0) ) or np.all(average_velocity == 0)
if not predicted_time:
calculated_target_box = self.tracked_object_metrics[camera]["target_box"]
else:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
] + self._predict_area_after_time(camera, predicted_time) / (
camera_width * camera_height
)
below_area_threshold = ( below_area_threshold = (
self.tracked_object_metrics[camera]["target_box"] calculated_target_box
< self.tracked_object_metrics[camera]["max_target_box"] < self.tracked_object_metrics[camera]["max_target_box"]
) )
# introduce some hysteresis to prevent a yo-yo zooming effect # introduce some hysteresis to prevent a yo-yo zooming effect
zoom_out_hysteresis = ( zoom_out_hysteresis = (
self.tracked_object_metrics[camera]["target_box"] calculated_target_box
> self.tracked_object_metrics[camera]["max_target_box"] > self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_OUT_HYSTERESIS * AUTOTRACKING_ZOOM_OUT_HYSTERESIS
) )
zoom_in_hysteresis = ( zoom_in_hysteresis = (
self.tracked_object_metrics[camera]["target_box"] calculated_target_box
< self.tracked_object_metrics[camera]["max_target_box"] < self.tracked_object_metrics[camera]["max_target_box"]
* AUTOTRACKING_ZOOM_IN_HYSTERESIS * AUTOTRACKING_ZOOM_IN_HYSTERESIS
) )
@ -902,13 +961,13 @@ class PtzAutoTracker:
# debug zooming # debug zooming
if debug_zooming: if debug_zooming:
logger.debug( logger.debug(
f"{camera}: Zoom test: touching edges: count: {touching_frame_edges} left: {bb_left < edge_threshold * camera_width}, right: {bb_right > (1 - edge_threshold) * camera_width}, top: {bb_top < edge_threshold * camera_height}, bottom: {bb_bottom > (1 - edge_threshold) * camera_height}" f"{camera}: Zoom test: touching edges: count: {touching_frame_edges} left: {bb_left < AUTOTRACKING_ZOOM_EDGE_THRESHOLD * camera_width}, right: {bb_right > (1 - AUTOTRACKING_ZOOM_EDGE_THRESHOLD) * camera_width}, top: {bb_top < AUTOTRACKING_ZOOM_EDGE_THRESHOLD * camera_height}, bottom: {bb_bottom > (1 - AUTOTRACKING_ZOOM_EDGE_THRESHOLD) * camera_height}"
) )
logger.debug( logger.debug(
f"{camera}: Zoom test: below distance threshold: {(below_distance_threshold)}" f"{camera}: Zoom test: below distance threshold: {(below_distance_threshold)}"
) )
logger.debug( logger.debug(
f"{camera}: Zoom test: below area threshold: {(below_area_threshold)} target: {self.tracked_object_metrics[camera]['target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']}" f"{camera}: Zoom test: below area threshold: {(below_area_threshold)} target: {self.tracked_object_metrics[camera]['target_box']}, calculated: {calculated_target_box}, max: {self.tracked_object_metrics[camera]['max_target_box']}"
) )
logger.debug( logger.debug(
f"{camera}: Zoom test: below dimension threshold: {below_dimension_threshold} width: {bb_right - bb_left}, max width: {camera_width * (self.zoom_factor[camera] + 0.1)}, height: {bb_bottom - bb_top}, max height: {camera_height * (self.zoom_factor[camera] + 0.1)}" f"{camera}: Zoom test: below dimension threshold: {below_dimension_threshold} width: {bb_right - bb_left}, max width: {camera_width * (self.zoom_factor[camera] + 0.1)}, height: {bb_bottom - bb_top}, max height: {camera_height * (self.zoom_factor[camera] + 0.1)}"
@ -919,10 +978,10 @@ class PtzAutoTracker:
logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}") logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}")
logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}") logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}")
logger.debug( logger.debug(
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {self.tracked_object_metrics[camera]["target_box"]}' f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
) )
logger.debug( logger.debug(
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {self.tracked_object_metrics[camera]["target_box"]}' f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
) )
# Zoom in conditions (and) # Zoom in conditions (and)
@ -961,6 +1020,7 @@ class PtzAutoTracker:
camera_width = camera_config.frame_shape[1] camera_width = camera_config.frame_shape[1]
camera_height = camera_config.frame_shape[0] camera_height = camera_config.frame_shape[0]
camera_fps = camera_config.detect.fps camera_fps = camera_config.detect.fps
predicted_movement_time = 0
average_velocity = np.zeros((4,)) average_velocity = np.zeros((4,))
predicted_box = obj.obj_data["box"] predicted_box = obj.obj_data["box"]
@ -1010,7 +1070,9 @@ class PtzAutoTracker:
f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}" f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}"
) )
zoom = self._get_zoom_amount(camera, obj, predicted_box, debug_zoom=True) zoom = self._get_zoom_amount(
camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
)
self._enqueue_move(camera, obj.obj_data["frame_time"], pan, tilt, zoom) self._enqueue_move(camera, obj.obj_data["frame_time"], pan, tilt, zoom)
@ -1018,12 +1080,14 @@ class PtzAutoTracker:
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
if camera_config.onvif.autotracking.zooming != ZoomingModeEnum.disabled: if camera_config.onvif.autotracking.zooming != ZoomingModeEnum.disabled:
zoom = self._get_zoom_amount(camera, obj, obj.obj_data["box"]) zoom = self._get_zoom_amount(camera, obj, obj.obj_data["box"], 0)
if zoom != 0: if zoom != 0:
self._enqueue_move(camera, obj.obj_data["frame_time"], 0, 0, zoom) self._enqueue_move(camera, obj.obj_data["frame_time"], 0, 0, zoom)
def _get_zoom_amount(self, camera, obj, predicted_box, debug_zoom=True): def _get_zoom_amount(
self, camera, obj, predicted_box, predicted_movement_time, debug_zoom=True
):
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
# frame width and height # frame width and height
@ -1046,7 +1110,11 @@ class PtzAutoTracker:
else: else:
if ( if (
result := self._should_zoom_in( result := self._should_zoom_in(
camera, obj, obj.obj_data["box"], debug_zoom camera,
obj,
obj.obj_data["box"],
predicted_movement_time,
debug_zoom,
) )
) is not None: ) is not None:
# divide zoom in 10 increments and always zoom out more than in # divide zoom in 10 increments and always zoom out more than in
@ -1077,13 +1145,27 @@ class PtzAutoTracker:
predicted_box predicted_box
if camera_config.onvif.autotracking.movement_weights if camera_config.onvif.autotracking.movement_weights
else obj.obj_data["box"], else obj.obj_data["box"],
predicted_movement_time,
debug_zoom, debug_zoom,
) )
) is not None: ) is not None:
if predicted_movement_time:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
] + self._predict_area_after_time(
camera, predicted_movement_time
) / (camera_width * camera_height)
logger.debug(
f"{camera}: Zooming prediction: predicted movement time: {predicted_movement_time}, original box: {self.tracked_object_metrics[camera]['target_box']}, calculated box: {calculated_target_box}"
)
else:
calculated_target_box = self.tracked_object_metrics[camera][
"target_box"
]
# zoom value # zoom value
ratio = ( ratio = (
self.tracked_object_metrics[camera]["max_target_box"] self.tracked_object_metrics[camera]["max_target_box"]
/ self.tracked_object_metrics[camera]["target_box"] / calculated_target_box
) )
zoom = (ratio - 1) / (ratio + 1) zoom = (ratio - 1) / (ratio + 1)
logger.debug( logger.debug(
@ -1151,28 +1233,28 @@ class PtzAutoTracker:
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"] and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
and obj.obj_data["frame_time"] and obj.obj_data["frame_time"]
!= self.tracked_object_history[camera][-1]["frame_time"] != self.tracked_object_history[camera][-1]["frame_time"]
and not ptz_moving_at_frame_time(
obj.obj_data["frame_time"],
self.ptz_metrics[camera]["ptz_start_time"].value,
self.ptz_metrics[camera]["ptz_stop_time"].value,
)
): ):
self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data)) self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data))
self._calculate_tracked_object_metrics(camera, obj) self._calculate_tracked_object_metrics(camera, obj)
if self.tracked_object_metrics[camera]["below_distance_threshold"]: if not ptz_moving_at_frame_time(
logger.debug( obj.obj_data["frame_time"],
f"{camera}: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}" self.ptz_metrics[camera]["ptz_start_time"].value,
) self.ptz_metrics[camera]["ptz_stop_time"].value,
):
if self.tracked_object_metrics[camera]["below_distance_threshold"]:
logger.debug(
f"{camera}: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
# no need to move, but try zooming # no need to move, but try zooming
self._autotrack_move_zoom_only(camera, obj) self._autotrack_move_zoom_only(camera, obj)
else: else:
logger.debug( logger.debug(
f"{camera}: Existing object (need to move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}" f"{camera}: Existing object (need to move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
) )
self._autotrack_move_ptz(camera, obj) self._autotrack_move_ptz(camera, obj)
return return

View File

@ -534,28 +534,27 @@ class OnvifController:
except Exception: except Exception:
pass # We're unsupported, that'll be reported in the next check. pass # We're unsupported, that'll be reported in the next check.
# there doesn't seem to be an onvif standard with this optional parameter try:
# some cameras can report MoveStatus with or without PanTilt or Zoom attributes pan_tilt_status = getattr(status.MoveStatus, "PanTilt", None)
pan_tilt_status = getattr(status.MoveStatus, "PanTilt", None) zoom_status = getattr(status.MoveStatus, "Zoom", None)
zoom_status = getattr(status.MoveStatus, "Zoom", None)
# if it's not an attribute, see if MoveStatus even exists in the status result # if it's not an attribute, see if MoveStatus even exists in the status result
if pan_tilt_status is None: if pan_tilt_status is None:
pan_tilt_status = getattr(status, "MoveStatus", None) pan_tilt_status = getattr(status, "MoveStatus", None)
# we're unsupported # we're unsupported
if pan_tilt_status is None or pan_tilt_status.lower() not in [ if pan_tilt_status is None or pan_tilt_status not in [
"idle", "IDLE",
"moving", "MOVING",
]: ]:
logger.error( raise Exception
f"Camera {camera_name} does not support the ONVIF GetStatus method. Autotracking will not function correctly and must be disabled in your config." except Exception:
) logger.warning(
return f"Camera {camera_name} does not support the ONVIF GetStatus method. Autotracking will not function correctly and must be disabled in your config."
)
return
if pan_tilt_status.lower() == "idle" and ( if pan_tilt_status == "IDLE" and (zoom_status is None or zoom_status == "IDLE"):
zoom_status is None or zoom_status.lower() == "idle"
):
self.cams[camera_name]["active"] = False self.cams[camera_name]["active"] = False
if not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set(): if not self.ptz_metrics[camera_name]["ptz_motor_stopped"].is_set():
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set() self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()

View File

@ -7,9 +7,9 @@ import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, RECORD_DIR from frigate.const import CACHE_DIR, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Previews, Recordings
from frigate.record.util import remove_empty_directories, sync_recordings from frigate.record.util import remove_empty_directories, sync_recordings
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
@ -33,10 +33,152 @@ class RecordingCleanup(threading.Thread):
logger.debug("Deleting tmp clip.") logger.debug("Deleting tmp clip.")
clear_and_unlink(p) clear_and_unlink(p)
def expire_existing_camera_recordings(
self, expire_date: float, config: CameraConfig, events: Event
) -> None:
"""Delete recordings for existing camera based on retention config."""
# Get the timestamp for cutoff of retained days
# Get recordings to check for expiration
recordings: Recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.path,
Recordings.objects,
Recordings.motion,
)
.where(
Recordings.camera == config.name,
Recordings.end_time < expire_date,
)
.order_by(Recordings.start_time)
.namedtuples()
.iterator()
)
# loop over recordings and see if they overlap with any non-expired events
# TODO: expire segments based on segment stats according to config
event_start = 0
deleted_recordings = set()
kept_recordings: list[tuple[float, float]] = []
for recording in recordings:
keep = False
# Now look for a reason to keep this recording segment
for idx in range(event_start, len(events)):
event: Event = events[idx]
# if the event starts in the future, stop checking events
# and let this recording segment expire
if event.start_time > recording.end_time:
keep = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= recording.start_time:
keep = True
break
# if the event ends before this recording segment starts, skip
# this event and check the next event for an overlap.
# since the events and recordings are sorted, we can skip events
# that end before the previous recording segment started on future segments
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings outside of the retention window or based on the retention mode
if (
not keep
or (
config.record.events.retain.mode == RetainModeEnum.motion
and recording.motion == 0
)
or (
config.record.events.retain.mode == RetainModeEnum.active_objects
and recording.objects == 0
)
):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
else:
kept_recordings.append((recording.start_time, recording.end_time))
# expire recordings
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_recordings_list = list(deleted_recordings)
for i in range(0, len(deleted_recordings_list), max_deletes):
Recordings.delete().where(
Recordings.id << deleted_recordings_list[i : i + max_deletes]
).execute()
previews: Previews = (
Previews.select(
Previews.id,
Previews.start_time,
Previews.end_time,
Previews.path,
)
.where(
Previews.camera == config.name,
Previews.end_time < expire_date,
)
.order_by(Previews.start_time)
.namedtuples()
.iterator()
)
# expire previews
recording_start = 0
deleted_previews = set()
for preview in previews:
keep = False
# look for a reason to keep this preview
for idx in range(recording_start, len(kept_recordings)):
start_time, end_time = kept_recordings[idx]
# if the recording starts in the future, stop checking recordings
# and let this preview expire
if start_time > preview.end_time:
keep = False
break
# if the recording ends after the preview starts, keep it
# and stop looking at recordings
if end_time >= preview.start_time:
keep = True
break
# if the recording ends before this preview starts, skip
# this recording and check the next recording for an overlap.
# since the kept recordings and previews are sorted, we can skip recordings
# that end before the current preview started
if end_time < preview.start_time:
recording_start = idx
# Delete previews without any relevant recordings
if not keep:
Path(preview.path).unlink(missing_ok=True)
deleted_previews.add(preview.id)
# expire previews
logger.debug(f"Expiring {len(deleted_previews)} previews")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_previews_list = list(deleted_previews)
for i in range(0, len(deleted_previews_list), max_deletes):
Previews.delete().where(
Previews.id << deleted_previews_list[i : i + max_deletes]
).execute()
def expire_recordings(self) -> None: def expire_recordings(self) -> None:
"""Delete recordings based on retention config.""" """Delete recordings based on retention config."""
logger.debug("Start expire recordings.") logger.debug("Start expire recordings.")
logger.debug("Start deleted cameras.") logger.debug("Start deleted cameras.")
# Handle deleted cameras # Handle deleted cameras
expire_days = self.config.record.retain.days expire_days = self.config.record.retain.days
expire_before = ( expire_before = (
@ -73,31 +215,12 @@ class RecordingCleanup(threading.Thread):
logger.debug("Start all cameras.") logger.debug("Start all cameras.")
for camera, config in self.config.cameras.items(): for camera, config in self.config.cameras.items():
logger.debug(f"Start camera: {camera}.") logger.debug(f"Start camera: {camera}.")
# Get the timestamp for cutoff of retained days
expire_days = config.record.retain.days expire_days = config.record.retain.days
expire_date = ( expire_date = (
datetime.datetime.now() - datetime.timedelta(days=expire_days) datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp() ).timestamp()
# Get recordings to check for expiration
recordings: Recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.path,
Recordings.objects,
Recordings.motion,
)
.where(
Recordings.camera == camera,
Recordings.end_time < expire_date,
)
.order_by(Recordings.start_time)
.namedtuples()
.iterator()
)
# Get all the events to check against # Get all the events to check against
events: Event = ( events: Event = (
Event.select( Event.select(
@ -115,60 +238,7 @@ class RecordingCleanup(threading.Thread):
.namedtuples() .namedtuples()
) )
# loop over recordings and see if they overlap with any non-expired events self.expire_existing_camera_recordings(expire_date, config, events)
# TODO: expire segments based on segment stats according to config
event_start = 0
deleted_recordings = set()
for recording in recordings:
keep = False
# Now look for a reason to keep this recording segment
for idx in range(event_start, len(events)):
event: Event = events[idx]
# if the event starts in the future, stop checking events
# and let this recording segment expire
if event.start_time > recording.end_time:
keep = False
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if event.end_time is None or event.end_time >= recording.start_time:
keep = True
break
# if the event ends before this recording segment starts, skip
# this event and check the next event for an overlap.
# since the events and recordings are sorted, we can skip events
# that end before the previous recording segment started on future segments
if event.end_time < recording.start_time:
event_start = idx
# Delete recordings outside of the retention window or based on the retention mode
if (
not keep
or (
config.record.events.retain.mode == RetainModeEnum.motion
and recording.motion == 0
)
or (
config.record.events.retain.mode
== RetainModeEnum.active_objects
and recording.objects == 0
)
):
Path(recording.path).unlink(missing_ok=True)
deleted_recordings.add(recording.id)
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
# delete up to 100,000 at a time
max_deletes = 100000
deleted_recordings_list = list(deleted_recordings)
for i in range(0, len(deleted_recordings_list), max_deletes):
Recordings.delete().where(
Recordings.id << deleted_recordings_list[i : i + max_deletes]
).execute()
logger.debug(f"End camera: {camera}.") logger.debug(f"End camera: {camera}.")
logger.debug("End all cameras.") logger.debug("End all cameras.")

View File

@ -2,7 +2,7 @@
import unittest import unittest
from frigate.output import get_canvas_shape from frigate.output.birdseye import get_canvas_shape
class TestBirdseye(unittest.TestCase): class TestBirdseye(unittest.TestCase):

View File

@ -653,7 +653,7 @@ class TestConfig(unittest.TestCase):
"inputs": [ "inputs": [
{ {
"path": "rtsp://10.0.0.1:554/video", "path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"], "roles": ["detect"],
}, },
{"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]}, {"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]},
] ]
@ -930,7 +930,7 @@ class TestConfig(unittest.TestCase):
"width": 1920, "width": 1920,
"fps": 5, "fps": 5,
}, },
"rtmp": {"enabled": True}, "audio": {"enabled": True},
} }
}, },
} }
@ -1167,122 +1167,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].snapshots.height == 150 assert runtime_config.cameras["back"].snapshots.height == 150
assert runtime_config.cameras["back"].snapshots.enabled assert runtime_config.cameras["back"].snapshots.enabled
def test_global_rtmp_disabled(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_default_not_rtmp(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"rtmp": {
"enabled": True,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_default(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
},
{
"path": "rtsp://10.0.0.1:554/video2",
"roles": ["record"],
},
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_jsmpeg(self): def test_global_jsmpeg(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
@ -1428,7 +1312,6 @@ class TestConfig(unittest.TestCase):
def test_global_timestamp_style_merge(self): def test_global_timestamp_style_merge(self):
config = { config = {
"mqtt": {"host": "mqtt"}, "mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
"timestamp_style": {"position": "br", "thickness": 2}, "timestamp_style": {"position": "br", "thickness": 2},
"cameras": { "cameras": {
"back": { "back": {

View File

@ -14,13 +14,12 @@ class TestFfmpegPresets(unittest.TestCase):
"inputs": [ "inputs": [
{ {
"path": "rtsp://10.0.0.1:554/video", "path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"], "roles": ["detect"],
} }
], ],
"output_args": { "output_args": {
"detect": "-f rawvideo -pix_fmt yuv420p", "detect": "-f rawvideo -pix_fmt yuv420p",
"record": "-f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an", "record": "-f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an",
"rtmp": "-c copy -f flv",
}, },
}, },
"detect": { "detect": {
@ -31,9 +30,6 @@ class TestFfmpegPresets(unittest.TestCase):
"record": { "record": {
"enabled": True, "enabled": True,
}, },
"rtmp": {
"enabled": True,
},
"name": "back", "name": "back",
} }
}, },
@ -157,29 +153,6 @@ class TestFfmpegPresets(unittest.TestCase):
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]) " ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
) )
def test_ffmpeg_output_rtmp_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "preset-rtmp-jpeg"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-jpeg" not in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert "-c:v libx264" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
def test_ffmpeg_output_rtmp_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
"rtmp"
] = "-some output"
frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in (
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main(verbosity=2) unittest.main(verbosity=2)

View File

@ -28,6 +28,7 @@ class TimelineProcessor(threading.Thread):
self.config = config self.config = config
self.queue = queue self.queue = queue
self.stop_event = stop_event self.stop_event = stop_event
self.pre_event_cache: dict[str, list[dict[str, any]]] = {}
def run(self) -> None: def run(self) -> None:
while not self.stop_event.is_set(): while not self.stop_event.is_set():
@ -46,6 +47,32 @@ class TimelineProcessor(threading.Thread):
self.handle_object_detection( self.handle_object_detection(
camera, event_type, prev_event_data, event_data camera, event_type, prev_event_data, event_data
) )
elif input_type == EventTypeEnum.api:
self.handle_api_entry(camera, event_type, event_data)
def insert_or_save(
self,
entry: dict[str, any],
prev_event_data: dict[any, any],
event_data: dict[any, any],
) -> None:
"""Insert into db or cache."""
id = entry[Timeline.source_id]
if not event_data["has_clip"] and not event_data["has_snapshot"]:
# the related event has not been saved yet, should be added to cache
if id in self.pre_event_cache.keys():
self.pre_event_cache[id].append(entry)
else:
self.pre_event_cache[id] = [entry]
else:
# the event is saved, insert to db and insert cached into db
if id in self.pre_event_cache.keys():
for e in self.pre_event_cache[id]:
Timeline.insert(e).execute()
self.pre_event_cache.pop(id)
Timeline.insert(entry).execute()
def handle_object_detection( def handle_object_detection(
self, self,
@ -53,15 +80,17 @@ class TimelineProcessor(threading.Thread):
event_type: str, event_type: str,
prev_event_data: dict[any, any], prev_event_data: dict[any, any],
event_data: dict[any, any], event_data: dict[any, any],
) -> None: ) -> bool:
"""Handle object detection.""" """Handle object detection."""
save = False
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
event_id = event_data["id"]
timeline_entry = { timeline_entry = {
Timeline.timestamp: event_data["frame_time"], Timeline.timestamp: event_data["frame_time"],
Timeline.camera: camera, Timeline.camera: camera,
Timeline.source: "tracked_object", Timeline.source: "tracked_object",
Timeline.source_id: event_data["id"], Timeline.source_id: event_id,
Timeline.data: { Timeline.data: {
"box": to_relative_box( "box": to_relative_box(
camera_config.detect.width, camera_config.detect.width,
@ -69,6 +98,7 @@ class TimelineProcessor(threading.Thread):
event_data["box"], event_data["box"],
), ),
"label": event_data["label"], "label": event_data["label"],
"sub_label": event_data.get("sub_label"),
"region": to_relative_box( "region": to_relative_box(
camera_config.detect.width, camera_config.detect.width,
camera_config.detect.height, camera_config.detect.height,
@ -77,36 +107,78 @@ class TimelineProcessor(threading.Thread):
"attribute": "", "attribute": "",
}, },
} }
# update sub labels for existing entries that haven't been added yet
if (
prev_event_data != None
and prev_event_data["sub_label"] != event_data["sub_label"]
and event_id in self.pre_event_cache.keys()
):
for e in self.pre_event_cache[event_id]:
e[Timeline.data]["sub_label"] = event_data["sub_label"]
if event_type == "start": if event_type == "start":
timeline_entry[Timeline.class_type] = "visible" timeline_entry[Timeline.class_type] = "visible"
Timeline.insert(timeline_entry).execute() save = True
elif event_type == "update": elif event_type == "update":
# zones have been updated
if ( if (
prev_event_data["current_zones"] != event_data["current_zones"] len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
and len(event_data["current_zones"]) > 0
and not event_data["stationary"] and not event_data["stationary"]
): ):
timeline_entry[Timeline.class_type] = "entered_zone" timeline_entry[Timeline.class_type] = "entered_zone"
timeline_entry[Timeline.data]["zones"] = event_data["current_zones"] timeline_entry[Timeline.data]["zones"] = event_data["current_zones"]
Timeline.insert(timeline_entry).execute() save = True
elif prev_event_data["stationary"] != event_data["stationary"]: elif prev_event_data["stationary"] != event_data["stationary"]:
timeline_entry[Timeline.class_type] = ( timeline_entry[Timeline.class_type] = (
"stationary" if event_data["stationary"] else "active" "stationary" if event_data["stationary"] else "active"
) )
Timeline.insert(timeline_entry).execute() save = True
elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}: elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
timeline_entry[Timeline.class_type] = "attribute" timeline_entry[Timeline.class_type] = "attribute"
timeline_entry[Timeline.data]["attribute"] = list( timeline_entry[Timeline.data]["attribute"] = list(
event_data["attributes"].keys() event_data["attributes"].keys()
)[0] )[0]
Timeline.insert(timeline_entry).execute() save = True
elif event_type == "end": elif event_type == "end":
if event_data["has_clip"] or event_data["has_snapshot"]: timeline_entry[Timeline.class_type] = "gone"
timeline_entry[Timeline.class_type] = "gone" save = True
Timeline.insert(timeline_entry).execute()
else: if save:
# if event was not saved then the timeline entries should be deleted self.insert_or_save(timeline_entry, prev_event_data, event_data)
Timeline.delete().where(
Timeline.source_id == event_data["id"] def handle_api_entry(
).execute() self,
camera: str,
event_type: str,
event_data: dict[any, any],
) -> bool:
if event_type != "new":
return False
if event_data.get("type", "api") == "audio":
timeline_entry = {
Timeline.class_type: "heard",
Timeline.timestamp: event_data["start_time"],
Timeline.camera: camera,
Timeline.source: "audio",
Timeline.source_id: event_data["id"],
Timeline.data: {
"label": event_data["label"],
"sub_label": event_data.get("sub_label"),
},
}
else:
timeline_entry = {
Timeline.class_type: "external",
Timeline.timestamp: event_data["start_time"],
Timeline.camera: camera,
Timeline.source: "api",
Timeline.source_id: event_data["id"],
Timeline.data: {
"label": event_data["label"],
"sub_label": event_data.get("sub_label"),
},
}
Timeline.insert(timeline_entry).execute()
return True

View File

@ -3,7 +3,13 @@ import random
import string import string
import numpy as np import numpy as np
from norfair import Detection, Drawable, Tracker, draw_boxes from norfair import (
Detection,
Drawable,
OptimizedKalmanFilterFactory,
Tracker,
draw_boxes,
)
from norfair.drawing.drawer import Drawer from norfair.drawing.drawer import Drawer
from frigate.config import CameraConfig from frigate.config import CameraConfig
@ -82,6 +88,13 @@ class NorfairTracker(ObjectTracker):
distance_threshold=2.5, distance_threshold=2.5,
initialization_delay=self.detect_config.min_initialized, initialization_delay=self.detect_config.min_initialized,
hit_counter_max=self.detect_config.max_disappeared, hit_counter_max=self.detect_config.max_disappeared,
# use default filter factory with custom values
# R is the multiplier for the sensor measurement noise matrix, default of 4.0
# lowering R means that we trust the position of the bounding boxes more
# testing shows that the prediction was being relied on a bit too much
# TODO: could use different kalman filter values along with
# the different tracker per object class
filter_factory=OptimizedKalmanFilterFactory(R=3.4),
) )
if self.ptz_autotracker_enabled.value: if self.ptz_autotracker_enabled.value:
self.ptz_motion_estimator = PtzMotionEstimator( self.ptz_motion_estimator = PtzMotionEstimator(

View File

@ -387,6 +387,7 @@ def copy_yuv_to_position(
destination_shape, destination_shape,
source_frame=None, source_frame=None,
source_channel_dim=None, source_channel_dim=None,
interpolation=cv2.INTER_LINEAR,
): ):
# get the coordinates of the channels for this position in the layout # get the coordinates of the channels for this position in the layout
y, u1, u2, v1, v2 = get_yuv_crop( y, u1, u2, v1, v2 = get_yuv_crop(
@ -435,7 +436,6 @@ def copy_yuv_to_position(
uv_y_offset = y_y_offset // 4 uv_y_offset = y_y_offset // 4
uv_x_offset = y_x_offset // 2 uv_x_offset = y_x_offset // 2
interpolation = cv2.INTER_LINEAR
# resize/copy y channel # resize/copy y channel
destination_frame[ destination_frame[
y[1] + y_y_offset : y[1] + y_y_offset + y_resize_height, y[1] + y_y_offset : y[1] + y_y_offset + y_resize_height,

View File

@ -13,7 +13,9 @@ from typing import Optional
import cv2 import cv2
import psutil import psutil
import py3nvml.py3nvml as nvml import py3nvml.py3nvml as nvml
import requests
from frigate.const import FFMPEG_HWACCEL_NVIDIA, FFMPEG_HWACCEL_VAAPI
from frigate.util.builtin import clean_camera_user_pass, escape_special_characters from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -371,6 +373,38 @@ def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
return sp.run(ffprobe_cmd, capture_output=True) return sp.run(ffprobe_cmd, capture_output=True)
def auto_detect_hwaccel() -> str:
"""Detect hwaccel args by default."""
try:
cuda = False
vaapi = False
resp = requests.get("http://127.0.0.1:1984/api/ffmpeg/hardware", timeout=3)
if resp.status_code == 200:
data: dict[str, list[dict[str, str]]] = resp.json()
for source in data.get("sources", []):
if "cuda" in source.get("url", "") and source.get("name") == "OK":
cuda = True
if "vaapi" in source.get("url", "") and source.get("name") == "OK":
vaapi = True
except requests.RequestException:
pass
if cuda:
logger.info("Automatically detected nvidia hwaccel for video decoding")
return FFMPEG_HWACCEL_NVIDIA
if vaapi:
logger.info("Automatically detected vaapi hwaccel for video decoding")
return FFMPEG_HWACCEL_VAAPI
logger.warning(
"Did not detect hwaccel, using a GPU for accelerated video decoding is highly recommended"
)
return ""
async def get_video_properties(url, get_duration=False) -> dict[str, any]: async def get_video_properties(url, get_duration=False) -> dict[str, any]:
async def calculate_duration(video: Optional[any]) -> float: async def calculate_duration(video: Optional[any]) -> float:
duration = None duration = None

View File

@ -0,0 +1,35 @@
"""Peewee migrations -- 021_create_previews_table.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql(
'CREATE TABLE IF NOT EXISTS "previews" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "camera" VARCHAR(20) NOT NULL, "path" VARCHAR(255) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "duration" REAL NOT NULL)'
)
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@ -262,7 +262,6 @@ def process(path, label, output, debug_path):
} }
] ]
}, },
"rtmp": {"enabled": False},
"record": {"enabled": False}, "record": {"enabled": False},
} }
}, },

25
web-old/.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
.npm

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 558 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 800 B

BIN
web-old/images/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

BIN
web-old/images/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,46 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="888.000000pt" height="888.000000pt" viewBox="0 0 888.000000 888.000000"
preserveAspectRatio="xMidYMid meet">
<metadata>
Created by potrace 1.11, written by Peter Selinger 2001-2013
</metadata>
<g transform="translate(0.000000,888.000000) scale(0.100000,-0.100000)"
fill="#000000" stroke="none">
<path d="M8228 8865 c-2 -2 -25 -6 -53 -9 -38 -5 -278 -56 -425 -91 -33 -7
-381 -98 -465 -121 -49 -14 -124 -34 -165 -45 -67 -18 -485 -138 -615 -176
-50 -14 -106 -30 -135 -37 -8 -2 -35 -11 -60 -19 -25 -8 -85 -27 -135 -42 -49
-14 -101 -31 -115 -36 -14 -5 -34 -11 -45 -13 -11 -3 -65 -19 -120 -36 -55
-18 -127 -40 -160 -50 -175 -53 -247 -77 -550 -178 -364 -121 -578 -200 -820
-299 -88 -36 -214 -88 -280 -115 -66 -27 -129 -53 -140 -58 -11 -5 -67 -29
-125 -54 -342 -144 -535 -259 -579 -343 -34 -66 7 -145 156 -299 229 -238 293
-316 340 -413 38 -80 41 -152 10 -281 -57 -234 -175 -543 -281 -732 -98 -174
-172 -239 -341 -297 -116 -40 -147 -52 -210 -80 -107 -49 -179 -107 -290 -236
-51 -59 -179 -105 -365 -131 -19 -2 -48 -7 -65 -9 -16 -3 -50 -8 -75 -11 -69
-9 -130 -39 -130 -63 0 -24 31 -46 78 -56 18 -4 139 -8 270 -10 250 -4 302
-11 335 -44 19 -18 19 -23 7 -46 -19 -36 -198 -121 -490 -233 -850 -328 -914
-354 -1159 -473 -185 -90 -337 -186 -395 -249 -60 -65 -67 -107 -62 -350 3
-113 7 -216 10 -230 3 -14 7 -52 10 -85 7 -70 14 -128 21 -170 2 -16 7 -48 10
-70 3 -22 11 -64 16 -94 6 -30 12 -64 14 -75 1 -12 5 -34 9 -51 3 -16 8 -39
10 -50 12 -57 58 -258 71 -310 9 -33 18 -69 20 -79 25 -110 138 -416 216 -582
21 -47 39 -87 39 -90 0 -7 217 -438 261 -521 109 -201 293 -501 347 -564 11
-13 37 -44 56 -68 69 -82 126 -109 160 -75 26 25 14 65 -48 164 -138 218 -142
245 -138 800 2 206 4 488 5 625 1 138 -1 293 -6 345 -28 345 -28 594 -1 760
12 69 54 187 86 235 33 52 188 212 293 302 98 84 108 93 144 121 19 15 52 42
75 61 78 64 302 229 426 313 248 169 483 297 600 326 53 14 205 6 365 -17 33
-5 155 -8 270 -6 179 3 226 7 316 28 58 13 140 25 182 26 82 2 120 6 217 22
73 12 97 16 122 18 12 1 23 21 38 70 l20 68 74 -17 c81 -20 155 -30 331 -45
69 -6 132 -8 715 -20 484 -11 620 -8 729 16 85 19 131 63 98 96 -25 26 -104
34 -302 32 -373 -2 -408 -1 -471 26 -90 37 2 102 171 120 33 3 76 8 95 10 19
2 71 7 115 10 243 17 267 20 338 37 145 36 47 102 -203 137 -136 19 -262 25
-490 22 -124 -2 -362 -4 -530 -4 l-305 -1 -56 26 c-65 31 -171 109 -238 176
-52 51 -141 173 -141 191 0 6 -6 22 -14 34 -18 27 -54 165 -64 244 -12 98 -6
322 12 414 9 47 29 127 45 176 26 80 58 218 66 278 1 11 6 47 10 80 3 33 8 70
10 83 2 13 7 53 11 90 3 37 8 74 9 83 22 118 22 279 -1 464 -20 172 -20 172
70 238 108 79 426 248 666 355 25 11 77 34 115 52 92 42 443 191 570 242 55
22 109 44 120 48 24 11 130 52 390 150 199 75 449 173 500 195 17 7 118 50
225 95 237 100 333 143 490 220 229 113 348 191 337 223 -3 10 -70 20 -79 12z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.9 KiB

BIN
web-old/images/marker.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 534 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

26
web-old/index.html Normal file
View File

@ -0,0 +1,26 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" href="/images/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Frigate</title>
<link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png" />
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png" />
<link rel="icon" type="image/svg+xml" href="/images/favicon.svg" />
<link rel="manifest" href="/site.webmanifest" />
<link rel="mask-icon" href="/images/favicon.svg" color="#3b82f7" />
<meta name="msapplication-TileColor" content="#3b82f7" />
<meta name="theme-color" content="#ffffff" media="(prefers-color-scheme: light)" />
<meta name="theme-color" content="#111827" media="(prefers-color-scheme: dark)" />
</head>
<body>
<div id="app" class="z-0"></div>
<div id="dialogs" class="z-0"></div>
<div id="menus" class="z-0"></div>
<div id="tooltips" class="z-0"></div>
<noscript>You need to enable JavaScript to run this app.</noscript>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

16624
web-old/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

62
web-old/package.json Normal file
View File

@ -0,0 +1,62 @@
{
"name": "frigate",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite --host",
"build": "tsc && vite build --base=/BASE_PATH/",
"lint": "eslint --ext .jsx,.js,.tsx,.ts --ignore-path .gitignore .",
"prettier:write": "prettier -u -w --ignore-path .gitignore \"*.{ts,tsx,js,jsx,css,html}\"",
"test": "vitest",
"coverage": "vitest run --coverage"
},
"dependencies": {
"@cycjimmy/jsmpeg-player": "^6.0.5",
"axios": "^1.5.0",
"copy-to-clipboard": "3.3.3",
"date-fns": "^2.30.0",
"idb-keyval": "^6.2.0",
"immer": "^10.0.1",
"monaco-yaml": "^4.0.4",
"preact": "^10.17.1",
"preact-async-route": "^2.2.1",
"preact-router": "^4.1.0",
"react": "npm:@preact/compat@^17.1.2",
"react-dom": "npm:@preact/compat@^17.1.2",
"react-use-websocket": "^3.0.0",
"strftime": "^0.10.1",
"swr": "^1.3.0",
"video.js": "^8.5.2",
"videojs-playlist": "^5.1.0",
"vite-plugin-monaco-editor": "^1.1.0"
},
"devDependencies": {
"@preact/preset-vite": "^2.5.0",
"@tailwindcss/forms": "^0.5.6",
"@testing-library/jest-dom": "^6.1.2",
"@testing-library/preact": "^3.2.3",
"@testing-library/user-event": "^14.4.3",
"@typescript-eslint/eslint-plugin": "^6.5.0",
"@typescript-eslint/parser": "^6.5.0",
"@vitest/coverage-v8": "^0.34.3",
"@vitest/ui": "^0.34.3",
"autoprefixer": "^10.4.15",
"eslint": "^8.48.0",
"eslint-config-preact": "^1.3.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-jest": "^27.2.3",
"eslint-plugin-prettier": "^5.0.0",
"eslint-plugin-vitest-globals": "^1.4.0",
"fake-indexeddb": "^4.0.1",
"jest-websocket-mock": "^2.5.0",
"jsdom": "^22.0.0",
"msw": "^1.2.1",
"postcss": "^8.4.29",
"prettier": "^3.0.3",
"tailwindcss": "^3.3.2",
"typescript": "^5.2.2",
"vite": "^4.4.9",
"vitest": "^0.34.3"
}
}

View File

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

Before

Width:  |  Height:  |  Size: 6.9 KiB

After

Width:  |  Height:  |  Size: 6.9 KiB

View File

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Some files were not shown because too many files have changed in this diff Show More