Merge remote-tracking branch 'upstream/dev' into 230626-feature-config-set

This commit is contained in:
Sergey Krashevich 2023-07-06 18:18:51 +03:00
commit 1d667fc68f
No known key found for this signature in database
GPG Key ID: 625171324E7D3856
67 changed files with 2870 additions and 1649 deletions

View File

@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sudo mkdir -p /media/frigate sudo mkdir -p /media/frigate
sudo chown -R "$(id -u):$(id -g)" /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
# s6 service file. For dev, where frigate is started from an interactive
# shell, we define it in .bashrc instead.
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
make version make version
cd web cd web

View File

@ -18,10 +18,13 @@ WORKDIR /rootfs
FROM base AS nginx FROM base AS nginx
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# bind /var/cache/apt to tmpfs to speed up nginx build # bind /var/cache/apt to tmpfs to speed up nginx build
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \ --mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_nginx.sh /deps/build_nginx.sh
FROM wget AS go2rtc FROM wget AS go2rtc
@ -61,14 +64,16 @@ RUN mkdir /models \
FROM wget as libusb-build FROM wget as libusb-build
ARG TARGETARCH ARG TARGETARCH
ARG DEBIAN_FRONTEND ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G
# Build libUSB without udev. Needed for Openvino NCS2 support # Build libUSB without udev. Needed for Openvino NCS2 support
WORKDIR /opt WORKDIR /opt
RUN apt-get update && apt-get install -y unzip build-essential automake libtool RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache
RUN wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \ RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
unzip v1.0.25.zip && cd libusb-1.0.25 && \ unzip v1.0.25.zip && cd libusb-1.0.25 && \
./bootstrap.sh && \ ./bootstrap.sh && \
./configure --disable-udev --enable-shared && \ ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
make -j $(nproc --all) make -j $(nproc --all)
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \ apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
@ -93,7 +98,9 @@ COPY labelmap.txt .
COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \ RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
# Get Audio Model and labels
RUN wget -qO cpu_audio_model.tflite https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1?lite-format=tflite
COPY audio-labelmap.txt .
FROM wget AS s6-overlay FROM wget AS s6-overlay
@ -127,7 +134,9 @@ RUN apt-get -qq update \
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \ libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \ libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# scipy dependencies # scipy dependencies
gcc gfortran libopenblas-dev liblapack-dev && \ gcc gfortran libopenblas-dev liblapack-dev \
# faster-fifo dependencies
g++ cython3 && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \

521
audio-labelmap.txt Normal file
View File

@ -0,0 +1,521 @@
speech
speech
speech
speech
babbling
speech
yell
bellow
whoop
yell
yell
yell
whispering
laughter
laughter
laughter
snicker
laughter
laughter
crying
crying
crying
yell
sigh
singing
choir
sodeling
chant
mantra
child_singing
synthetic_singing
rapping
humming
groan
grunt
whistling
breathing
wheeze
snoring
gasp
pant
snort
cough
throat_clearing
sneeze
sniff
run
shuffle
footsteps
chewing
biting
gargling
stomach_rumble
burping
hiccup
fart
hands
finger_snapping
clapping
heartbeat
heart_murmur
cheering
applause
chatter
crowd
speech
children_playing
animal
pets
dog
bark
yip
howl
bow-wow
growling
whimper_dog
cat
purr
meow
hiss
caterwaul
livestock
horse
clip-clop
neigh
cattle
moo
cowbell
pig
oink
goat
bleat
sheep
fowl
chicken
cluck
cock-a-doodle-doo
turkey
gobble
duck
quack
goose
honk
wild_animals
roaring_cats
roar
bird
chird
chirp
squawk
pigeon
coo
crow
caw
owl
hoot
flapping_wings
dogs
rats
mouse
patter
insect
cricket
mosquito
fly
buzz
buzz
frog
croak
snake
rattle
whale_vocalization
music
musical_instrument
plucked_string_instrument
guitar
electric_guitar
bass_guitar
acoustic_guitar
steel_guitar
tapping
strum
banjo
sitar
mandolin
zither
ukulele
keyboard
piano
electric_piano
organ
electronic_organ
hammond_organ
synthesizer
sampler
harpsichord
percussion
drum_kit
drum_machine
drum
snare_drum
rimshot
drum_roll
bass_drum
timpani
tabla
cymbal
hi-hat
wood_block
tambourine
rattle
maraca
gong
tubular_bells
mallet_percussion
marimba
glockenspiel
vibraphone
steelpan
orchestra
brass_instrument
french_horn
trumpet
trombone
bowed_string_instrument
string_section
violin
pizzicato
cello
double_bass
wind_instrument
flute
saxophone
clarinet
harp
bell
church_bell
jingle_bell
bicycle_bell
tuning_fork
chime
wind_chime
change_ringing
harmonica
accordion
bagpipes
didgeridoo
shofar
theremin
singing_bowl
scratching
pop_music
hip_hop_music
beatboxing
rock_music
heavy_metal
punk_rock
grunge
progressive_rock
rock_and_roll
psychedelic_rock
rhythm_and_blues
soul_music
reggae
country
swing_music
bluegrass
funk
folk_music
middle_eastern_music
jazz
disco
classical_music
opera
electronic_music
house_music
techno
dubstep
drum_and_bass
electronica
electronic_dance_music
ambient_music
trance_music
music_of_latin_america
salsa_music
flamenco
blues
music_for_children
new-age_music
vocal_music
a_capella
music_of_africa
afrobeat
christian_music
gospel_music
music_of_asia
carnatic_music
music_of_bollywood
ska
traditional_music
independent_music
song
background_music
theme_music
jingle
soundtrack_music
lullaby
video_game_music
christmas_music
dance_music
wedding_music
happy_music
sad_music
tender_music
exciting_music
angry_music
scary_music
wind
rustling_leaves
wind_noise
thunderstorm
thunder
water
rain
raindrop
rain_on_surface
stream
waterfall
ocean
waves
steam
gurgling
fire
crackle
vehicle
boat
sailboat
rowboat
motorboat
ship
motor_vehicle
car
honk
toot
car_alarm
power_windows
skidding
tire_squeal
car_passing_by
race_car
truck
air_brake
air_horn
reversing_beeps
ice_cream_truck
bus
emergency_vehicle
police_car
ambulance
fire_engine
motorcycle
traffic_noise
rail_transport
train
train_whistle
train_horn
railroad_car
train_wheels_squealing
subway
aircraft
aircraft_engine
jet_engine
propeller
helicopter
fixed-wing_aircraft
bicycle
skateboard
engine
light_engine
dental_drill's_drill
lawn_mower
chainsaw
medium_engine
heavy_engine
engine_knocking
engine_starting
idling
accelerating
door
doorbell
ding-dong
sliding_door
slam
knock
tap
squeak
cupboard_open_or_close
drawer_open_or_close
dishes
cutlery
chopping
frying
microwave_oven
blender
water_tap
sink
bathtub
hair_dryer
toilet_flush
toothbrush
electric_toothbrush
vacuum_cleaner
zipper
keys_jangling
coin
scissors
electric_shaver
shuffling_cards
typing
typewriter
computer_keyboard
writing
alarm
telephone
telephone_bell_ringing
ringtone
telephone_dialing
dial_tone
busy_signal
alarm_clock
siren
civil_defense_siren
buzzer
smoke_detector
fire_alarm
foghorn
whistle
steam_whistle
mechanisms
ratchet
clock
tick
tick-tock
gears
pulleys
sewing_machine
mechanical_fan
air_conditioning
cash_register
printer
camera
single-lens_reflex_camera
tools
hammer
jackhammer
sawing
filing
sanding
power_tool
drill
explosion
gunshot
machine_gun
fusillade
artillery_fire
cap_gun
fireworks
firecracker
burst
eruption
boom
wood
chop
splinter
crack
glass
chink
shatter
liquid
splash
slosh
squish
drip
pour
trickle
gush
fill
spray
pump
stir
boiling
sonar
arrow
whoosh
thump
thunk
electronic_tuner
effects_unit
chorus_effect
basketball_bounce
bang
slap
whack
smash
breaking
bouncing
whip
flap
scratch
scrape
rub
roll
crushing
crumpling
tearing
beep
ping
ding
clang
squeal
creak
rustle
whir
clatter
sizzle
clicking
clickety-clack
rumble
plop
jingle
hum
zing
boing
crunch
silence
sine_wave
harmonic
chirp_tone
sound_effect
pulse
inside
inside
inside
outside
outside
reverberation
echo
noise
environmental_noise
static
mains_hum
distortion
sidetone
cacophony
white_noise
pink_noise
throbbing
vibration
television
radio
field_recording

View File

@ -12,16 +12,32 @@ from frigate.util import create_mask
# get info on the video # get info on the video
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") # cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4") # cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4") cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4")
# cap = cv2.VideoCapture("airport.mp4") # cap = cv2.VideoCapture("airport.mp4")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) fps = cap.get(cv2.CAP_PROP_FPS)
frame_shape = (height, width, 3) frame_shape = (height, width, 3)
# Nick back:
# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0",
# "310,350,300,402,224,405,241,354",
# "378,0,375,26,0,23,0,0",
# Front door:
# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
# "336,833,438,1024,346,1093,103,1052,24,814",
# Back
# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0",
# "505,95,506,138,388,153,384,114",
# "689,72,689,122,549,134,547,89",
# "261,134,264,176,169,195,167,158",
# "145,159,146,202,70,220,65,183",
mask = create_mask( mask = create_mask(
(height, width), (height, width),
[], [
"1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
"336,833,438,1024,346,1093,103,1052,24,814",
],
) )
# create the motion config # create the motion config
@ -29,7 +45,7 @@ motion_config_1 = MotionConfig()
motion_config_1.mask = np.zeros((height, width), np.uint8) motion_config_1.mask = np.zeros((height, width), np.uint8)
motion_config_1.mask[:] = mask motion_config_1.mask[:] = mask
# motion_config_1.improve_contrast = 1 # motion_config_1.improve_contrast = 1
# motion_config_1.frame_height = 150 motion_config_1.frame_height = 150
# motion_config_1.frame_alpha = 0.02 # motion_config_1.frame_alpha = 0.02
# motion_config_1.threshold = 30 # motion_config_1.threshold = 30
# motion_config_1.contour_area = 10 # motion_config_1.contour_area = 10
@ -38,10 +54,11 @@ motion_config_2 = MotionConfig()
motion_config_2.mask = np.zeros((height, width), np.uint8) motion_config_2.mask = np.zeros((height, width), np.uint8)
motion_config_2.mask[:] = mask motion_config_2.mask[:] = mask
# motion_config_2.improve_contrast = 1 # motion_config_2.improve_contrast = 1
# motion_config_2.frame_height = 150 motion_config_2.frame_height = 150
# motion_config_2.frame_alpha = 0.01 # motion_config_2.frame_alpha = 0.01
# motion_config_2.threshold = 20 motion_config_2.threshold = 20
# motion_config.contour_area = 10 # motion_config.contour_area = 10
save_images = True save_images = True
improved_motion_detector_1 = ImprovedMotionDetector( improved_motion_detector_1 = ImprovedMotionDetector(
@ -52,8 +69,6 @@ improved_motion_detector_1 = ImprovedMotionDetector(
threshold=mp.Value("i", motion_config_1.threshold), threshold=mp.Value("i", motion_config_1.threshold),
contour_area=mp.Value("i", motion_config_1.contour_area), contour_area=mp.Value("i", motion_config_1.contour_area),
name="default", name="default",
clipLimit=2.0,
tileGridSize=(8, 8),
) )
improved_motion_detector_1.save_images = save_images improved_motion_detector_1.save_images = save_images

View File

@ -15,6 +15,10 @@ apt-get -yqq build-dep nginx
apt-get -yqq install --no-install-recommends ca-certificates wget apt-get -yqq install --no-install-recommends ca-certificates wget
update-ca-certificates -f update-ca-certificates -f
apt install -y ccache
export PATH="/usr/lib/ccache:$PATH"
mkdir /tmp/nginx mkdir /tmp/nginx
wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1 tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1
@ -62,5 +66,5 @@ cd /tmp/nginx
--add-module=../nginx-rtmp-module \ --add-module=../nginx-rtmp-module \
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough" --with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
make -j$(nproc) && make install make CC="ccache gcc" -j$(nproc) && make install
rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default

View File

@ -44,6 +44,7 @@ function migrate_db_path() {
echo "[INFO] Preparing Frigate..." echo "[INFO] Preparing Frigate..."
migrate_db_path migrate_db_path
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
echo "[INFO] Starting Frigate..." echo "[INFO] Starting Frigate..."

View File

@ -43,6 +43,8 @@ function get_ip_and_port_from_supervisor() {
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
} }
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Preparing go2rtc config..." echo "[INFO] Preparing go2rtc config..."

View File

@ -7,7 +7,7 @@ import sys
import yaml import yaml
sys.path.insert(0, "/opt/frigate") sys.path.insert(0, "/opt/frigate")
from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 from frigate.const import BIRDSEYE_PIPE # noqa: E402
from frigate.ffmpeg_presets import ( # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402
parse_preset_hardware_acceleration_encode, parse_preset_hardware_acceleration_encode,
) )
@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None:
go2rtc_config["rtsp"]["default_query"] = "mp4" go2rtc_config["rtsp"]["default_query"] = "mp4"
# need to replace ffmpeg command when using ffmpeg4 # need to replace ffmpeg command when using ffmpeg4
if not os.path.exists(BTBN_PATH): if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
if go2rtc_config.get("ffmpeg") is None: if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = { go2rtc_config["ffmpeg"] = {
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"

View File

@ -0,0 +1,63 @@
---
id: audio_detectors
title: Audio Detectors
---
Frigate provides a builtin audio detector which runs on the CPU. Compared to object detection in images, audio detection is a relatively lightweight operation so the only option is to run the detection on a CPU.
## Configuration
Audio events work by detecting a type of audio and creating an event, the event will end once the type of audio has not been heard for the configured amount of time. Audio events save a snapshot at the beginning of the event as well as recordings throughout the event. The recordings are retained using the configured recording retention.
### Enabling Audio Events
Audio events can be enabled for all cameras or only for specific cameras.
```yaml
audio: # <- enable audio events for all camera
enabled: True
cameras:
front_camera:
ffmpeg:
...
audio:
enabled: True # <- enable audio events for the front_camera
```
If you are using multiple streams then you must set the `audio` role on the stream that is going to be used for audio detection, this can be any stream but the stream must have audio included.
:::note
The ffmpeg process for capturing audio will be a separate connection to the camera along with the other roles assigned to the camera, for this reason it is recommended that the go2rtc restream is used for this purpose. See [the restream docs](/configuration/restream.md) for more information.
:::
```yaml
cameras:
front_camera:
ffmpeg:
inputs:
- path: rtsp://.../main_stream
roles:
- record
- path: rtsp://.../sub_stream # <- this stream must have audio enabled
roles:
- audio
- detect
```
### Configuring Audio Events
The included audio model has over 500 different types of audio that can be detected, many of which are not practical. By default `bark`, `speech`, `yell`, and `scream` are enabled but these can be customized.
```yaml
audio:
enabled: True
listen:
- bark
- scream
- speech
- yell
```

View File

@ -138,6 +138,20 @@ model:
labelmap: labelmap:
2: vehicle 2: vehicle
# Optional: Audio Events Configuration
# NOTE: Can be overridden at the camera level
audio:
# Optional: Enable audio events (default: shown below)
enabled: False
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
max_not_heard: 30
# Optional: Types of audio to listen for (default: shown below)
listen:
- bark
- scream
- speech
- yell
# Optional: logger verbosity settings # Optional: logger verbosity settings
logger: logger:
# Optional: Default log verbosity (default: shown below) # Optional: Default log verbosity (default: shown below)
@ -189,6 +203,11 @@ ffmpeg:
record: preset-record-generic record: preset-record-generic
# Optional: output args for rtmp streams (default: shown below) # Optional: output args for rtmp streams (default: shown below)
rtmp: preset-rtmp-generic rtmp: preset-rtmp-generic
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
retry_interval: 10
# Optional: Detect configuration # Optional: Detect configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
@ -275,7 +294,7 @@ motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255. # The value should be between 1 and 255.
threshold: 20 threshold: 30
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below) # needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
@ -448,10 +467,11 @@ cameras:
# Required: the path to the stream # Required: the path to the stream
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {} # NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: detect,record,rtmp # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp
# NOTICE: In addition to assigning the record and rtmp roles, # NOTICE: In addition to assigning the audio, record, and rtmp roles,
# they must also be enabled in the camera config. # they must also be enabled in the camera config.
roles: roles:
- audio
- detect - detect
- record - record
- rtmp - rtmp

View File

@ -1,6 +1,6 @@
--- ---
id: detectors id: object_detectors
title: Detectors title: Object Detectors
--- ---
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.

View File

@ -67,6 +67,7 @@ cameras:
roles: roles:
- record - record
- detect - detect
- audio # <- only necessary if audio detection is enabled
http_cam: http_cam:
ffmpeg: ffmpeg:
output_args: output_args:
@ -77,6 +78,7 @@ cameras:
roles: roles:
- record - record
- detect - detect
- audio # <- only necessary if audio detection is enabled
``` ```
### With Sub Stream ### With Sub Stream
@ -112,6 +114,7 @@ cameras:
- path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream - path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- audio # <- only necessary if audio detection is enabled
- detect - detect
http_cam: http_cam:
ffmpeg: ffmpeg:
@ -125,6 +128,7 @@ cameras:
- path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream - path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream input_args: preset-rtsp-restream
roles: roles:
- audio # <- only necessary if audio detection is enabled
- detect - detect
``` ```

View File

@ -50,7 +50,7 @@ The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU - 6th Gen Intel Platforms and newer that have an iGPU
- x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) - x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2)
More information is available [in the detector docs](/configuration/detectors#openvino-detector) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below: Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
### TensorRT ### TensorRT
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/detectors#nvidia-tensorrt-detector). The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speeds will vary greatly depending on the GPU and the model used. Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below: `tiny` variants are faster than the equivalent non-tiny model, some known examples are below:

View File

@ -71,7 +71,7 @@ cameras:
... ...
``` ```
More details on available detectors can be found [here](../configuration/detectors.md). More details on available detectors can be found [here](../configuration/object_detectors.md).
Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference). Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference).

View File

@ -109,11 +109,19 @@ Same data available at `/api/stats` published at a configurable interval.
### `frigate/<camera_name>/detect/set` ### `frigate/<camera_name>/detect/set`
Topic to turn detection for a camera on and off. Expected values are `ON` and `OFF`. Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/detect/state` ### `frigate/<camera_name>/detect/state`
Topic with current state of detection for a camera. Published values are `ON` and `OFF`. Topic with current state of object detection for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/audio/set`
Topic to turn audio detection for a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/audio/state`
Topic with current state of audio detection for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/recordings/set` ### `frigate/<camera_name>/recordings/set`
@ -176,7 +184,7 @@ Topic to send PTZ commands to camera.
| Command | Description | | Command | Description |
| ---------------------- | ----------------------------------------------------------------------------------------- | | ---------------------- | ----------------------------------------------------------------------------------------- |
| `preset-<preset_name>` | send command to move to preset with name `<preset_name>` | | `preset_<preset_name>` | send command to move to preset with name `<preset_name>` |
| `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] | | `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] |
| `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] | | `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] |
| `STOP` | send command to stop moving | | `STOP` | send command to stop moving |

View File

@ -16,7 +16,8 @@ module.exports = {
], ],
Configuration: [ Configuration: [
"configuration/index", "configuration/index",
"configuration/detectors", "configuration/object_detectors",
"configuration/audio_detectors",
"configuration/cameras", "configuration/cameras",
"configuration/masks", "configuration/masks",
"configuration/record", "configuration/record",

View File

@ -6,12 +6,13 @@ import shutil
import signal import signal
import sys import sys
import traceback import traceback
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from types import FrameType from types import FrameType
from typing import Optional from typing import Optional
import faster_fifo as ff
import psutil import psutil
from faster_fifo import Queue
from peewee_migrate import Router from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
@ -29,6 +30,7 @@ from frigate.const import (
MODEL_CACHE_DIR, MODEL_CACHE_DIR,
RECORD_DIR, RECORD_DIR,
) )
from frigate.events.audio import listen_to_audio
from frigate.events.cleanup import EventCleanup from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor from frigate.events.maintainer import EventProcessor
@ -44,7 +46,8 @@ from frigate.record.record import manage_recordings
from frigate.stats import StatsEmitter, stats_init from frigate.stats import StatsEmitter, stats_init
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, RecordMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util.builtin import LimitedQueue as LQueue
from frigate.version import VERSION from frigate.version import VERSION
from frigate.video import capture_camera, track_camera from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
@ -55,14 +58,14 @@ logger = logging.getLogger(__name__)
class FrigateApp: class FrigateApp:
def __init__(self) -> None: def __init__(self) -> None:
self.stop_event: MpEvent = mp.Event() self.stop_event: MpEvent = mp.Event()
self.detection_queue: Queue = mp.Queue() self.detection_queue: Queue = ff.Queue()
self.detectors: dict[str, ObjectDetectProcess] = {} self.detectors: dict[str, ObjectDetectProcess] = {}
self.detection_out_events: dict[str, MpEvent] = {} self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = [] self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue() self.log_queue: Queue = ff.Queue()
self.plus_api = PlusApi() self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.record_metrics: dict[str, RecordMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
self.processes: dict[str, int] = {} self.processes: dict[str, int] = {}
def set_environment_vars(self) -> None: def set_environment_vars(self) -> None:
@ -104,37 +107,74 @@ class FrigateApp:
user_config = FrigateConfig.parse_file(config_file) user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config(self.plus_api) self.config = user_config.runtime_config(self.plus_api)
for camera_name in self.config.cameras.keys(): for camera_name, camera_config in self.config.cameras.items():
# create camera_metrics # create camera_metrics
self.camera_metrics[camera_name] = { self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0), "camera_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"skipped_fps": mp.Value("d", 0.0), # issue https://github.com/python/typeshed/issues/8799
"process_fps": mp.Value("d", 0.0), # from mypy 0.981 onwards
"detection_enabled": mp.Value( "skipped_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"i", self.config.cameras[camera_name].detect.enabled # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"detection_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].detect.enabled,
), ),
"motion_enabled": mp.Value("i", True), "motion_enabled": mp.Value("i", True), # type: ignore[typeddict-item]
"improve_contrast_enabled": mp.Value( # issue https://github.com/python/typeshed/issues/8799
"i", self.config.cameras[camera_name].motion.improve_contrast # from mypy 0.981 onwards
"improve_contrast_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.improve_contrast,
), ),
"motion_threshold": mp.Value( "motion_threshold": mp.Value( # type: ignore[typeddict-item]
"i", self.config.cameras[camera_name].motion.threshold # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.threshold,
), ),
"motion_contour_area": mp.Value( "motion_contour_area": mp.Value( # type: ignore[typeddict-item]
"i", self.config.cameras[camera_name].motion.contour_area # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].motion.contour_area,
), ),
"detection_fps": mp.Value("d", 0.0), "detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"detection_frame": mp.Value("d", 0.0), # issue https://github.com/python/typeshed/issues/8799
"read_start": mp.Value("d", 0.0), # from mypy 0.981 onwards
"ffmpeg_pid": mp.Value("i", 0), "detection_frame": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"frame_queue": mp.Queue(maxsize=2), # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"read_start": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"frame_queue": LQueue(maxsize=2),
"capture_process": None, "capture_process": None,
"process": None, "process": None,
} }
self.record_metrics[camera_name] = { self.feature_metrics[camera_name] = {
"record_enabled": mp.Value( "audio_enabled": mp.Value( # type: ignore[typeddict-item]
"i", self.config.cameras[camera_name].record.enabled # issue https://github.com/python/typeshed/issues/8799
) # from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].audio.enabled,
),
"record_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].record.enabled,
),
} }
def set_log_levels(self) -> None: def set_log_levels(self) -> None:
@ -150,22 +190,22 @@ class FrigateApp:
def init_queues(self) -> None: def init_queues(self) -> None:
# Queues for clip processing # Queues for clip processing
self.event_queue: Queue = mp.Queue() self.event_queue: Queue = ff.Queue()
self.event_processed_queue: Queue = mp.Queue() self.event_processed_queue: Queue = ff.Queue()
self.video_output_queue: Queue = mp.Queue( self.video_output_queue: Queue = LQueue(
maxsize=len(self.config.cameras.keys()) * 2 maxsize=len(self.config.cameras.keys()) * 2
) )
# Queue for cameras to push tracked objects to # Queue for cameras to push tracked objects to
self.detected_frames_queue: Queue = mp.Queue( self.detected_frames_queue: Queue = LQueue(
maxsize=len(self.config.cameras.keys()) * 2 maxsize=len(self.config.cameras.keys()) * 2
) )
# Queue for recordings info # Queue for recordings info
self.recordings_info_queue: Queue = mp.Queue() self.recordings_info_queue: Queue = ff.Queue()
# Queue for timeline events # Queue for timeline events
self.timeline_queue: Queue = mp.Queue() self.timeline_queue: Queue = ff.Queue()
def init_database(self) -> None: def init_database(self) -> None:
def vacuum_db(db: SqliteExtDatabase) -> None: def vacuum_db(db: SqliteExtDatabase) -> None:
@ -222,7 +262,7 @@ class FrigateApp:
recording_process = mp.Process( recording_process = mp.Process(
target=manage_recordings, target=manage_recordings,
name="recording_manager", name="recording_manager",
args=(self.config, self.recordings_info_queue, self.record_metrics), args=(self.config, self.recordings_info_queue, self.feature_metrics),
) )
recording_process.daemon = True recording_process.daemon = True
self.recording_process = recording_process self.recording_process = recording_process
@ -281,7 +321,7 @@ class FrigateApp:
self.config, self.config,
self.onvif_controller, self.onvif_controller,
self.camera_metrics, self.camera_metrics,
self.record_metrics, self.feature_metrics,
comms, comms,
) )
@ -390,6 +430,18 @@ class FrigateApp:
capture_process.start() capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}") logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_audio_processors(self) -> None:
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0:
audio_process = mp.Process(
target=listen_to_audio,
name="audio_capture",
args=(self.config, self.feature_metrics),
)
audio_process.daemon = True
audio_process.start()
self.processes["audioDetector"] = audio_process.pid or 0
logger.info(f"Audio process started: {audio_process.pid}")
def start_timeline_processor(self) -> None: def start_timeline_processor(self) -> None:
self.timeline_processor = TimelineProcessor( self.timeline_processor = TimelineProcessor(
self.config, self.timeline_queue, self.stop_event self.config, self.timeline_queue, self.stop_event
@ -486,6 +538,7 @@ class FrigateApp:
self.start_detected_frames_processor() self.start_detected_frames_processor()
self.start_camera_processors() self.start_camera_processors()
self.start_camera_capture_processes() self.start_camera_capture_processes()
self.start_audio_processors()
self.start_storage_maintainer() self.start_storage_maintainer()
self.init_stats() self.init_stats()
self.init_external_event_processor() self.init_external_event_processor()

View File

@ -6,8 +6,8 @@ from typing import Any, Callable
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.ptz import OnvifCommandEnum, OnvifController from frigate.ptz import OnvifCommandEnum, OnvifController
from frigate.types import CameraMetricsTypes, RecordMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util import restart_frigate from frigate.util.services import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -39,19 +39,20 @@ class Dispatcher:
config: FrigateConfig, config: FrigateConfig,
onvif: OnvifController, onvif: OnvifController,
camera_metrics: dict[str, CameraMetricsTypes], camera_metrics: dict[str, CameraMetricsTypes],
record_metrics: dict[str, RecordMetricsTypes], feature_metrics: dict[str, FeatureMetricsTypes],
communicators: list[Communicator], communicators: list[Communicator],
) -> None: ) -> None:
self.config = config self.config = config
self.onvif = onvif self.onvif = onvif
self.camera_metrics = camera_metrics self.camera_metrics = camera_metrics
self.record_metrics = record_metrics self.feature_metrics = feature_metrics
self.comms = communicators self.comms = communicators
for comm in self.comms: for comm in self.comms:
comm.subscribe(self._receive) comm.subscribe(self._receive)
self._camera_settings_handlers: dict[str, Callable] = { self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command,
"detect": self._on_detect_command, "detect": self._on_detect_command,
"improve_contrast": self._on_motion_improve_contrast_command, "improve_contrast": self._on_motion_improve_contrast_command,
"motion": self._on_motion_command, "motion": self._on_motion_command,
@ -186,6 +187,29 @@ class Dispatcher:
motion_settings.threshold = payload # type: ignore[union-attr] motion_settings.threshold = payload # type: ignore[union-attr]
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True) self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
def _on_audio_command(self, camera_name: str, payload: str) -> None:
"""Callback for audio topic."""
audio_settings = self.config.cameras[camera_name].audio
if payload == "ON":
if not self.config.cameras[camera_name].audio.enabled_in_config:
logger.error(
"Audio detection must be enabled in the config to be turned on via MQTT."
)
return
if not audio_settings.enabled:
logger.info(f"Turning on audio detection for {camera_name}")
audio_settings.enabled = True
self.feature_metrics[camera_name]["audio_enabled"].value = True
elif payload == "OFF":
if self.feature_metrics[camera_name]["audio_enabled"].value:
logger.info(f"Turning off audio detection for {camera_name}")
audio_settings.enabled = False
self.feature_metrics[camera_name]["audio_enabled"].value = False
self.publish(f"{camera_name}/audio/state", payload, retain=True)
def _on_recordings_command(self, camera_name: str, payload: str) -> None: def _on_recordings_command(self, camera_name: str, payload: str) -> None:
"""Callback for recordings topic.""" """Callback for recordings topic."""
record_settings = self.config.cameras[camera_name].record record_settings = self.config.cameras[camera_name].record
@ -200,12 +224,12 @@ class Dispatcher:
if not record_settings.enabled: if not record_settings.enabled:
logger.info(f"Turning on recordings for {camera_name}") logger.info(f"Turning on recordings for {camera_name}")
record_settings.enabled = True record_settings.enabled = True
self.record_metrics[camera_name]["record_enabled"].value = True self.feature_metrics[camera_name]["record_enabled"].value = True
elif payload == "OFF": elif payload == "OFF":
if self.record_metrics[camera_name]["record_enabled"].value: if self.feature_metrics[camera_name]["record_enabled"].value:
logger.info(f"Turning off recordings for {camera_name}") logger.info(f"Turning off recordings for {camera_name}")
record_settings.enabled = False record_settings.enabled = False
self.record_metrics[camera_name]["record_enabled"].value = False self.feature_metrics[camera_name]["record_enabled"].value = False
self.publish(f"{camera_name}/recordings/state", payload, retain=True) self.publish(f"{camera_name}/recordings/state", payload, retain=True)
@ -229,7 +253,7 @@ class Dispatcher:
try: try:
if "preset" in payload.lower(): if "preset" in payload.lower():
command = OnvifCommandEnum.preset command = OnvifCommandEnum.preset
param = payload.lower().split("-")[1] param = payload.lower()[payload.index("_") + 1 :]
else: else:
command = OnvifCommandEnum[payload.lower()] command = OnvifCommandEnum[payload.lower()]
param = "" param = ""

View File

@ -41,7 +41,7 @@ class MqttClient(Communicator): # type: ignore[misc]
for camera_name, camera in self.config.cameras.items(): for camera_name, camera in self.config.cameras.items():
self.publish( self.publish(
f"{camera_name}/recordings/state", f"{camera_name}/recordings/state",
"ON" if camera.record.enabled else "OFF", "ON" if camera.record.enabled_in_config else "OFF",
retain=True, retain=True,
) )
self.publish( self.publish(
@ -49,6 +49,11 @@ class MqttClient(Communicator): # type: ignore[misc]
"ON" if camera.snapshots.enabled else "OFF", "ON" if camera.snapshots.enabled else "OFF",
retain=True, retain=True,
) )
self.publish(
f"{camera_name}/audio/state",
"ON" if camera.audio.enabled_in_config else "OFF",
retain=True,
)
self.publish( self.publish(
f"{camera_name}/detect/state", f"{camera_name}/detect/state",
"ON" if camera.detect.enabled else "OFF", "ON" if camera.detect.enabled else "OFF",
@ -144,6 +149,7 @@ class MqttClient(Communicator): # type: ignore[misc]
"recordings", "recordings",
"snapshots", "snapshots",
"detect", "detect",
"audio",
"motion", "motion",
"improve_contrast", "improve_contrast",
"motion_threshold", "motion_threshold",

View File

@ -22,13 +22,13 @@ from frigate.ffmpeg_presets import (
parse_preset_output_rtmp, parse_preset_output_rtmp,
) )
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import ( from frigate.util.builtin import (
create_mask,
deep_merge, deep_merge,
escape_special_characters, escape_special_characters,
get_ffmpeg_arg_list, get_ffmpeg_arg_list,
load_config_with_no_duplicates, load_config_with_no_duplicates,
) )
from frigate.util.image import create_mask
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -40,6 +40,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_LISTEN_AUDIO = ["bark", "speech", "yell", "scream"]
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
@ -187,7 +188,7 @@ class RecordConfig(FrigateBaseModel):
class MotionConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel):
threshold: int = Field( threshold: int = Field(
default=20, default=30,
title="Motion detection threshold (1-255).", title="Motion detection threshold (1-255).",
ge=1, ge=1,
le=255, le=255,
@ -387,6 +388,19 @@ class ObjectConfig(FrigateBaseModel):
mask: Union[str, List[str]] = Field(default="", title="Object mask.") mask: Union[str, List[str]] = Field(default="", title="Object mask.")
class AudioConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable audio events.")
max_not_heard: int = Field(
default=30, title="Seconds of not hearing the type of audio to end the event."
)
listen: List[str] = Field(
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
)
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of audio detection."
)
class BirdseyeModeEnum(str, Enum): class BirdseyeModeEnum(str, Enum):
objects = "objects" objects = "objects"
motion = "motion" motion = "motion"
@ -463,9 +477,14 @@ class FfmpegConfig(FrigateBaseModel):
default_factory=FfmpegOutputArgsConfig, default_factory=FfmpegOutputArgsConfig,
title="FFmpeg output arguments per role.", title="FFmpeg output arguments per role.",
) )
retry_interval: float = Field(
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
class CameraRoleEnum(str, Enum): class CameraRoleEnum(str, Enum):
audio = "audio"
record = "record" record = "record"
rtmp = "rtmp" rtmp = "rtmp"
detect = "detect" detect = "detect"
@ -627,6 +646,9 @@ class CameraConfig(FrigateBaseModel):
objects: ObjectConfig = Field( objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Object configuration." default_factory=ObjectConfig, title="Object configuration."
) )
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Audio events configuration."
)
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration." default_factory=DetectConfig, title="Object detection configuration."
@ -657,12 +679,16 @@ class CameraConfig(FrigateBaseModel):
# add roles to the input if there is only one # add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1: if len(config["ffmpeg"]["inputs"]) == 1:
has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", []) has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", [])
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
config["ffmpeg"]["inputs"][0]["roles"] = [ config["ffmpeg"]["inputs"][0]["roles"] = [
"record", "record",
"detect", "detect",
] ]
if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
if has_rtmp: if has_rtmp:
config["ffmpeg"]["inputs"][0]["roles"].append("rtmp") config["ffmpeg"]["inputs"][0]["roles"].append("rtmp")
@ -795,6 +821,11 @@ def verify_config_roles(camera_config: CameraConfig) -> None:
f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input." f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input."
) )
if camera_config.audio.enabled and "audio" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
)
def verify_valid_live_stream_name( def verify_valid_live_stream_name(
frigate_config: FrigateConfig, camera_config: CameraConfig frigate_config: FrigateConfig, camera_config: CameraConfig
@ -907,6 +938,9 @@ class FrigateConfig(FrigateBaseModel):
objects: ObjectConfig = Field( objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Global object configuration." default_factory=ObjectConfig, title="Global object configuration."
) )
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Global Audio events configuration."
)
motion: Optional[MotionConfig] = Field( motion: Optional[MotionConfig] = Field(
title="Global motion detection configuration." title="Global motion detection configuration."
) )
@ -931,6 +965,7 @@ class FrigateConfig(FrigateBaseModel):
# Global config to propagate down to camera level # Global config to propagate down to camera level
global_config = config.dict( global_config = config.dict(
include={ include={
"audio": ...,
"birdseye": ..., "birdseye": ...,
"record": ..., "record": ...,
"snapshots": ..., "snapshots": ...,
@ -976,8 +1011,9 @@ class FrigateConfig(FrigateBaseModel):
camera_config.onvif.password = camera_config.onvif.password.format( camera_config.onvif.password = camera_config.onvif.password.format(
**FRIGATE_ENV_VARS **FRIGATE_ENV_VARS
) )
# set config recording value # set config pre-value
camera_config.record.enabled_in_config = camera_config.record.enabled camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled
# Add default filters # Add default filters
object_keys = camera_config.objects.track object_keys = camera_config.objects.track

View File

@ -8,9 +8,26 @@ EXPORT_DIR = f"{BASE_DIR}/exports"
BIRDSEYE_PIPE = "/tmp/cache/birdseye" BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache" CACHE_DIR = "/tmp/cache"
YAML_EXT = (".yaml", ".yml") YAML_EXT = (".yaml", ".yml")
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video" PLUS_API_HOST = "https://api.frigate.video"
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
# Attributes
ATTRIBUTE_LABEL_MAP = {
"person": ["face", "amazon"],
"car": ["ups", "fedex", "amazon", "license_plate"],
}
ALL_ATTRIBUTE_LABELS = [
item for sublist in ATTRIBUTE_LABEL_MAP.values() for item in sublist
]
# Audio Consts
AUDIO_DURATION = 0.975
AUDIO_FORMAT = "s16le"
AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000
# Regex Consts # Regex Consts
@ -28,5 +45,4 @@ DRIVER_INTEL_iHD = "iHD"
# Record Values # Record Values
MAX_SEGMENT_DURATION = 600 MAX_SEGMENT_DURATION = 600
SECONDS_IN_DAY = 60 * 60 * 24
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times

View File

@ -11,7 +11,7 @@ from pydantic import BaseModel, Extra, Field
from pydantic.fields import PrivateAttr from pydantic.fields import PrivateAttr
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import load_labels from frigate.util.builtin import load_labels
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

256
frigate/events/audio.py Normal file
View File

@ -0,0 +1,256 @@
"""Handle creating audio events."""
import datetime
import logging
import multiprocessing as mp
import os
import signal
import threading
from types import FrameType
from typing import Optional
import numpy as np
import requests
from setproctitle import setproctitle
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import (
AUDIO_DURATION,
AUDIO_FORMAT,
AUDIO_MAX_BIT_RANGE,
AUDIO_SAMPLE_RATE,
CACHE_DIR,
FRIGATE_LOCALHOST,
)
from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe
from frigate.object_detection import load_labels
from frigate.types import FeatureMetricsTypes
from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.services import listen
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
def get_ffmpeg_command(input_args: list[str], input_path: str, pipe: str) -> list[str]:
return get_ffmpeg_arg_list(
f"ffmpeg {{}} -i {{}} -f {AUDIO_FORMAT} -ar {AUDIO_SAMPLE_RATE} -ac 1 -y {{}}".format(
" ".join(input_args),
input_path,
pipe,
)
)
def listen_to_audio(
config: FrigateConfig,
process_info: dict[str, FeatureMetricsTypes],
) -> None:
stop_event = mp.Event()
audio_threads: list[threading.Thread] = []
def exit_process() -> None:
for thread in audio_threads:
thread.join()
logger.info("Exiting audio detector...")
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
exit_process()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = "process:audio_manager"
setproctitle("frigate.audio_manager")
listen()
for camera in config.cameras.values():
if camera.enabled and camera.audio.enabled_in_config:
audio = AudioEventMaintainer(camera, process_info, stop_event)
audio_threads.append(audio)
audio.start()
class AudioTfl:
def __init__(self, stop_event: mp.Event):
self.stop_event = stop_event
self.labels = load_labels("/audio-labelmap.txt")
self.interpreter = Interpreter(
model_path="/cpu_audio_model.tflite",
num_threads=2,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def _detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
detections = np.zeros((20, 6), np.float32)
res = self.interpreter.get_tensor(self.tensor_output_details[0]["index"])[0]
non_zero_indices = res > 0
class_ids = np.argpartition(-res, 20)[:20]
class_ids = class_ids[np.argsort(-res[class_ids])]
class_ids = class_ids[non_zero_indices[class_ids]]
scores = res[class_ids]
boxes = np.full((scores.shape[0], 4), -1, np.float32)
count = len(scores)
for i in range(count):
if scores[i] < 0.4 or i == 20:
break
detections[i] = [
class_ids[i],
float(scores[i]),
boxes[i][0],
boxes[i][1],
boxes[i][2],
boxes[i][3],
]
return detections
def detect(self, tensor_input, threshold=0.8):
detections = []
if self.stop_event.is_set():
return detections
raw_detections = self._detect_raw(tensor_input)
for d in raw_detections:
if d[1] < threshold:
break
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
return detections
class AudioEventMaintainer(threading.Thread):
def __init__(
self,
camera: CameraConfig,
feature_metrics: dict[str, FeatureMetricsTypes],
stop_event: mp.Event,
) -> None:
threading.Thread.__init__(self)
self.name = f"{camera.name}_audio_event_processor"
self.config = camera
self.feature_metrics = feature_metrics
self.detections: dict[dict[str, any]] = feature_metrics
self.stop_event = stop_event
self.detector = AudioTfl(stop_event)
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
self.pipe = f"{CACHE_DIR}/{self.config.name}-audio"
self.ffmpeg_cmd = get_ffmpeg_command(
get_ffmpeg_arg_list(self.config.ffmpeg.global_args)
+ parse_preset_input("preset-rtsp-audio-only", 1),
[i.path for i in self.config.ffmpeg.inputs if "audio" in i.roles][0],
self.pipe,
)
self.pipe_file = None
self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio")
self.audio_listener = None
def detect_audio(self, audio) -> None:
if not self.feature_metrics[self.config.name]["audio_enabled"].value:
return
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
model_detections = self.detector.detect(waveform)
for label, score, _ in model_detections:
if label not in self.config.audio.listen:
continue
self.handle_detection(label, score)
self.expire_detections()
def handle_detection(self, label: str, score: float) -> None:
if self.detections.get(label):
self.detections[label][
"last_detection"
] = datetime.datetime.now().timestamp()
else:
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create",
json={"duration": None, "source_type": "audio"},
)
if resp.status_code == 200:
event_id = resp.json()[0]["event_id"]
self.detections[label] = {
"id": event_id,
"label": label,
"last_detection": datetime.datetime.now().timestamp(),
}
def expire_detections(self) -> None:
now = datetime.datetime.now().timestamp()
for detection in self.detections.values():
if not detection:
continue
if (
now - detection.get("last_detection", now)
> self.config.audio.max_not_heard
):
resp = requests.put(
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
json={
"end_time": detection["last_detection"]
+ self.config.record.events.post_capture
},
)
if resp.status_code == 200:
self.detections[detection["label"]] = None
else:
logger.warn(
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
)
def restart_audio_pipe(self) -> None:
try:
os.mkfifo(self.pipe)
except FileExistsError:
pass
self.audio_listener = start_or_restart_ffmpeg(
self.ffmpeg_cmd, logger, self.logpipe, None, self.audio_listener
)
def read_audio(self) -> None:
if self.pipe_file is None:
self.pipe_file = open(self.pipe, "rb")
try:
audio = np.frombuffer(self.pipe_file.read(self.chunk_size), dtype=np.int16)
self.detect_audio(audio)
except BrokenPipeError:
self.logpipe.dump()
self.restart_audio_pipe()
def run(self) -> None:
self.restart_audio_pipe()
while not self.stop_event.is_set():
self.read_audio()
self.pipe_file.close()
stop_ffmpeg(self.audio_listener, logger)
self.logpipe.close()

View File

@ -6,15 +6,15 @@ import logging
import os import os
import random import random
import string import string
from multiprocessing.queues import Queue
from typing import Optional from typing import Optional
import cv2 import cv2
from faster_fifo import Queue
from frigate.config import CameraConfig, FrigateConfig from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.util import draw_box_with_label from frigate.util.image import draw_box_with_label
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,6 +29,7 @@ class ExternalEventProcessor:
self, self,
camera: str, camera: str,
label: str, label: str,
source_type: str,
sub_label: Optional[str], sub_label: Optional[str],
duration: Optional[int], duration: Optional[int],
include_recording: bool, include_recording: bool,
@ -56,22 +57,26 @@ class ExternalEventProcessor:
"label": label, "label": label,
"sub_label": sub_label, "sub_label": sub_label,
"camera": camera, "camera": camera,
"start_time": now, "start_time": now - camera_config.record.events.pre_capture,
"end_time": now + duration if duration is not None else None, "end_time": now
+ duration
+ camera_config.record.events.post_capture
if duration is not None
else None,
"thumbnail": thumbnail, "thumbnail": thumbnail,
"has_clip": camera_config.record.enabled and include_recording, "has_clip": camera_config.record.enabled and include_recording,
"has_snapshot": True, "has_snapshot": True,
"type": source_type,
}, },
) )
) )
return event_id return event_id
def finish_manual_event(self, event_id: str) -> None: def finish_manual_event(self, event_id: str, end_time: float) -> None:
"""Finish external event with indeterminate duration.""" """Finish external event with indeterminate duration."""
now = datetime.datetime.now().timestamp()
self.queue.put( self.queue.put(
(EventTypeEnum.api, "end", None, {"id": event_id, "end_time": now}) (EventTypeEnum.api, "end", None, {"id": event_id, "end_time": end_time})
) )
def _write_images( def _write_images(

View File

@ -3,21 +3,21 @@ import logging
import queue import queue
import threading import threading
from enum import Enum from enum import Enum
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from typing import Dict from typing import Dict
from faster_fifo import Queue
from frigate.config import EventsConfig, FrigateConfig from frigate.config import EventsConfig, FrigateConfig
from frigate.models import Event from frigate.models import Event
from frigate.types import CameraMetricsTypes from frigate.types import CameraMetricsTypes
from frigate.util import to_relative_box from frigate.util.builtin import to_relative_box
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class EventTypeEnum(str, Enum): class EventTypeEnum(str, Enum):
api = "api" api = "api"
# audio = "audio"
tracked_object = "tracked_object" tracked_object = "tracked_object"
@ -72,8 +72,11 @@ class EventProcessor(threading.Thread):
except queue.Empty: except queue.Empty:
continue continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}") logger.debug(
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
)
if source_type == EventTypeEnum.tracked_object:
self.timeline_queue.put( self.timeline_queue.put(
( (
camera, camera,
@ -84,7 +87,6 @@ class EventProcessor(threading.Thread):
) )
) )
if source_type == EventTypeEnum.tracked_object:
if event_type == "start": if event_type == "start":
self.events_in_process[event_data["id"]] = event_data self.events_in_process[event_data["id"]] = event_data
continue continue
@ -191,6 +193,7 @@ class EventProcessor(threading.Thread):
"score": score, "score": score,
"top_score": event_data["top_score"], "top_score": event_data["top_score"],
"attributes": attributes, "attributes": attributes,
"type": "object",
}, },
} }
@ -214,8 +217,8 @@ class EventProcessor(threading.Thread):
del self.events_in_process[event_data["id"]] del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera)) self.event_processed_queue.put((event_data["id"], camera))
def handle_external_detection(self, type: str, event_data: Event): def handle_external_detection(self, event_type: str, event_data: Event) -> None:
if type == "new": if event_type == "new":
event = { event = {
Event.id: event_data["id"], Event.id: event_data["id"],
Event.label: event_data["label"], Event.label: event_data["label"],
@ -227,22 +230,16 @@ class EventProcessor(threading.Thread):
Event.has_clip: event_data["has_clip"], Event.has_clip: event_data["has_clip"],
Event.has_snapshot: event_data["has_snapshot"], Event.has_snapshot: event_data["has_snapshot"],
Event.zones: [], Event.zones: [],
Event.data: {}, Event.data: {"type": event_data["type"]},
} }
elif type == "end": Event.insert(event).execute()
elif event_type == "end":
event = { event = {
Event.id: event_data["id"], Event.id: event_data["id"],
Event.end_time: event_data["end_time"], Event.end_time: event_data["end_time"],
} }
try: try:
( Event.update(event).where(Event.id == event_data["id"]).execute()
Event.insert(event)
.on_conflict(
conflict_target=[Event.id],
update=event,
)
.execute()
)
except Exception: except Exception:
logger.warning(f"Failed to update manual event: {event_data['id']}") logger.warning(f"Failed to update manual event: {event_data['id']}")

View File

@ -5,8 +5,7 @@ import os
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from frigate.const import BTBN_PATH from frigate.util.services import vainfo_hwaccel
from frigate.util import vainfo_hwaccel
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,7 +42,11 @@ class LibvaGpuSelector:
return "" return ""
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" TIMEOUT_PARAM = (
"-timeout"
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
else "-stimeout"
)
_gpu_selector = LibvaGpuSelector() _gpu_selector = LibvaGpuSelector()
_user_agent_args = [ _user_agent_args = [
@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = {
} }
PRESETS_HW_ACCEL_SCALE = { PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-32-h264": "-r {0} -s {1}x{2}", "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h264": "-r {0} -s {1}x{2}", "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"default": "-r {0} -s {1}x{2}", "default": "-r {0} -vf fps={0},scale={1}:{2}",
} }
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
@ -282,6 +285,13 @@ PRESETS_INPUT = {
"-use_wallclock_as_timestamps", "-use_wallclock_as_timestamps",
"1", "1",
], ],
"preset-rtsp-audio-only": [
"-rtsp_transport",
"tcp",
TIMEOUT_PARAM,
"5000000",
"-vn",
],
"preset-rtsp-restream": _user_agent_args "preset-rtsp-restream": _user_agent_args
+ [ + [
"-rtsp_transport", "-rtsp_transport",

View File

@ -38,14 +38,9 @@ from frigate.ptz import OnvifController
from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.record.export import PlaybackFactorEnum, RecordingExporter
from frigate.stats import stats_snapshot from frigate.stats import stats_snapshot
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
from frigate.util import ( from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers
clean_camera_user_pass, from frigate.util.image import update_yaml_from_url
ffprobe_stream, from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel
get_tz_modifiers,
restart_frigate,
update_yaml_from_url,
vainfo_hwaccel,
)
from frigate.version import VERSION from frigate.version import VERSION
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -411,6 +406,24 @@ def set_sub_label(id):
) )
@bp.route("/labels")
def get_labels():
camera = request.args.get("camera", type=str, default="")
try:
if camera:
events = Event.select(Event.label).where(Event.camera == camera).distinct()
else:
events = Event.select(Event.label).distinct()
except Exception as e:
return jsonify(
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
)
labels = sorted([e.label for e in events])
return jsonify(labels)
@bp.route("/sub_labels") @bp.route("/sub_labels")
def get_sub_labels(): def get_sub_labels():
split_joined = request.args.get("split_joined", type=int) split_joined = request.args.get("split_joined", type=int)
@ -867,6 +880,7 @@ def create_event(camera_name, label):
event_id = current_app.external_processor.create_manual_event( event_id = current_app.external_processor.create_manual_event(
camera_name, camera_name,
label, label,
json.get("source_type", "api"),
json.get("sub_label", None), json.get("sub_label", None),
json.get("duration", 30), json.get("duration", 30),
json.get("include_recording", True), json.get("include_recording", True),
@ -891,8 +905,11 @@ def create_event(camera_name, label):
@bp.route("/events/<event_id>/end", methods=["PUT"]) @bp.route("/events/<event_id>/end", methods=["PUT"])
def end_event(event_id): def end_event(event_id):
json: dict[str, any] = request.get_json(silent=True) or {}
try: try:
current_app.external_processor.finish_manual_event(event_id) end_time = json.get("end_time", datetime.now().timestamp())
current_app.external_processor.finish_manual_event(event_id, end_time)
except Exception: except Exception:
return jsonify( return jsonify(
{"success": False, "message": f"{event_id} must be set and valid."}, 404 {"success": False, "message": f"{event_id} must be set and valid."}, 404
@ -1143,6 +1160,15 @@ def latest_frame(camera_name):
height = int(request.args.get("h", str(frame.shape[0]))) height = int(request.args.get("h", str(frame.shape[0])))
width = int(height * frame.shape[1] / frame.shape[0]) width = int(height * frame.shape[1] / frame.shape[0])
if frame is None:
return "Unable to get valid frame from {}".format(camera_name), 500
if height < 1 or width < 1:
return (
"Invalid height / width requested :: {} / {}".format(height, width),
400,
)
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode( ret, jpg = cv2.imencode(

View File

@ -7,13 +7,13 @@ import signal
import threading import threading
from collections import deque from collections import deque
from logging import handlers from logging import handlers
from multiprocessing.queues import Queue
from types import FrameType from types import FrameType
from typing import Deque, Optional from typing import Deque, Optional
from faster_fifo import Queue
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.util import clean_camera_user_pass from frigate.util.builtin import clean_camera_user_pass
def listener_configurer() -> None: def listener_configurer() -> None:

View File

@ -1,6 +1,7 @@
import cv2 import cv2
import imutils import imutils
import numpy as np import numpy as np
from scipy.ndimage import gaussian_filter
from frigate.config import MotionConfig from frigate.config import MotionConfig
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
@ -15,9 +16,10 @@ class ImprovedMotionDetector(MotionDetector):
improve_contrast, improve_contrast,
threshold, threshold,
contour_area, contour_area,
clipLimit=2.0,
tileGridSize=(2, 2),
name="improved", name="improved",
blur_radius=1,
interpolation=cv2.INTER_NEAREST,
contrast_frame_history=50,
): ):
self.name = name self.name = name
self.config = config self.config = config
@ -28,13 +30,12 @@ class ImprovedMotionDetector(MotionDetector):
config.frame_height * frame_shape[1] // frame_shape[0], config.frame_height * frame_shape[1] // frame_shape[0],
) )
self.avg_frame = np.zeros(self.motion_frame_size, np.float32) self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
self.motion_frame_count = 0 self.motion_frame_count = 0
self.frame_counter = 0 self.frame_counter = 0
resized_mask = cv2.resize( resized_mask = cv2.resize(
config.mask, config.mask,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR, interpolation=cv2.INTER_AREA,
) )
self.mask = np.where(resized_mask == [0]) self.mask = np.where(resized_mask == [0])
self.save_images = False self.save_images = False
@ -42,7 +43,11 @@ class ImprovedMotionDetector(MotionDetector):
self.improve_contrast = improve_contrast self.improve_contrast = improve_contrast
self.threshold = threshold self.threshold = threshold
self.contour_area = contour_area self.contour_area = contour_area
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) self.blur_radius = blur_radius
self.interpolation = interpolation
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
self.contrast_values[:, 1:2] = 255
self.contrast_values_index = 0
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
@ -53,27 +58,44 @@ class ImprovedMotionDetector(MotionDetector):
resized_frame = cv2.resize( resized_frame = cv2.resize(
gray, gray,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR, interpolation=self.interpolation,
) )
if self.save_images: if self.save_images:
resized_saved = resized_frame.copy() resized_saved = resized_frame.copy()
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
if self.save_images:
blurred_saved = resized_frame.copy()
# Improve contrast # Improve contrast
if self.improve_contrast.value: if self.improve_contrast.value:
resized_frame = self.clahe.apply(resized_frame) # TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
# skip contrast calcs if the image is a single color
if minval < maxval:
# keep track of the last 50 contrast values
self.contrast_values[self.contrast_values_index] = [minval, maxval]
self.contrast_values_index += 1
if self.contrast_values_index == len(self.contrast_values):
self.contrast_values_index = 0
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
resized_frame = np.clip(resized_frame, avg_min, avg_max)
resized_frame = (
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
).astype(np.uint8)
if self.save_images: if self.save_images:
contrasted_saved = resized_frame.copy() contrasted_saved = resized_frame.copy()
# mask frame # mask frame
# this has to come after contrast improvement
resized_frame[self.mask] = [255] resized_frame[self.mask] = [255]
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
if self.save_images:
blurred_saved = resized_frame.copy()
if self.save_images or self.calibrating: if self.save_images or self.calibrating:
self.frame_counter += 1 self.frame_counter += 1
# compare to average # compare to average
@ -134,8 +156,8 @@ class ImprovedMotionDetector(MotionDetector):
) )
frames = [ frames = [
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR), cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR), cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR), cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
thresh_dilated, thresh_dilated,

View File

@ -7,12 +7,15 @@ import signal
import threading import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import faster_fifo as ff
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.detectors import create_detector from frigate.detectors import create_detector
from frigate.detectors.detector_config import InputTensorEnum from frigate.detectors.detector_config import InputTensorEnum
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager
from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -72,7 +75,7 @@ class LocalObjectDetector(ObjectDetector):
def run_detector( def run_detector(
name: str, name: str,
detection_queue: mp.Queue, detection_queue: ff.Queue,
out_events: dict[str, mp.Event], out_events: dict[str, mp.Event],
avg_speed, avg_speed,
start, start,

View File

@ -22,7 +22,7 @@ from frigate.config import (
) )
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.util import ( from frigate.util.image import (
SharedMemoryFrameManager, SharedMemoryFrameManager,
area, area,
calculate_region, calculate_region,
@ -188,15 +188,14 @@ class TrackedObject:
zone_score = self.zone_presence.get(name, 0) zone_score = self.zone_presence.get(name, 0)
# check if the object is in the zone # check if the object is in the zone
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0: if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
self.zone_presence[name] = zone_score + 1 self.zone_presence[name] = zone_score + 1
# an object is only considered present in a zone if it has a zone inertia of 3+ # an object is only considered present in a zone if it has a zone inertia of 3+
if zone_score >= zone.inertia: if zone_score >= zone.inertia:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(
self, zone.filters
):
current_zones.append(name) current_zones.append(name)
if name not in self.entered_zones: if name not in self.entered_zones:
self.entered_zones.append(name) self.entered_zones.append(name)
else: else:

View File

@ -24,11 +24,70 @@ from ws4py.websocket import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop from frigate.util.image import (
SharedMemoryFrameManager,
copy_yuv_to_position,
get_yuv_crop,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_standard_aspect_ratio(width, height) -> tuple[int, int]:
"""Ensure that only standard aspect ratios are used."""
known_aspects = [
(16, 9),
(9, 16),
(32, 9),
(12, 9),
(9, 12),
] # aspects are scaled to have common relative size
known_aspects_ratios = list(
map(lambda aspect: aspect[0] / aspect[1], known_aspects)
)
closest = min(
known_aspects_ratios,
key=lambda x: abs(x - (width / height)),
)
return known_aspects[known_aspects_ratios.index(closest)]
class Canvas:
def __init__(self, canvas_width: int, canvas_height: int) -> None:
gcd = math.gcd(canvas_width, canvas_height)
self.aspect = get_standard_aspect_ratio(
(canvas_width / gcd), (canvas_height / gcd)
)
self.width = canvas_width
self.height = (self.width * self.aspect[1]) / self.aspect[0]
self.coefficient_cache: dict[int, int] = {}
self.aspect_cache: dict[str, tuple[int, int]] = {}
def get_aspect(self, coefficient: int) -> tuple[int, int]:
return (self.aspect[0] * coefficient, self.aspect[1] * coefficient)
def get_coefficient(self, camera_count: int) -> int:
return self.coefficient_cache.get(camera_count, 2)
def set_coefficient(self, camera_count: int, coefficient: int) -> None:
self.coefficient_cache[camera_count] = coefficient
def get_camera_aspect(
self, cam_name: str, camera_width: int, camera_height: int
) -> tuple[int, int]:
cached = self.aspect_cache.get(cam_name)
if cached:
return cached
gcd = math.gcd(camera_width, camera_height)
camera_aspect = get_standard_aspect_ratio(
camera_width / gcd, camera_height / gcd
)
self.aspect_cache[cam_name] = camera_aspect
return camera_aspect
class FFMpegConverter: class FFMpegConverter:
def __init__( def __init__(
self, self,
@ -156,7 +215,12 @@ class BroadcastThread(threading.Thread):
class BirdsEyeFrameManager: class BirdsEyeFrameManager:
def __init__(self, config: FrigateConfig, frame_manager: SharedMemoryFrameManager): def __init__(
self,
config: FrigateConfig,
frame_manager: SharedMemoryFrameManager,
stop_event: mp.Event,
):
self.config = config self.config = config
self.mode = config.birdseye.mode self.mode = config.birdseye.mode
self.frame_manager = frame_manager self.frame_manager = frame_manager
@ -165,6 +229,8 @@ class BirdsEyeFrameManager:
self.frame_shape = (height, width) self.frame_shape = (height, width)
self.yuv_shape = (height * 3 // 2, width) self.yuv_shape = (height * 3 // 2, width)
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height)
self.stop_event = stop_event
# initialize the frame as black and with the Frigate logo # initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8) self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
@ -270,15 +336,152 @@ class BirdsEyeFrameManager:
def update_frame(self): def update_frame(self):
"""Update to a new frame for birdseye.""" """Update to a new frame for birdseye."""
def calculate_layout( # determine how many cameras are tracking objects within the last 30 seconds
canvas, cameras_to_add: list[str], coefficient active_cameras = set(
) -> tuple[any]: [
cam
for cam, cam_data in self.cameras.items()
if cam_data["last_active_frame"] > 0
and cam_data["current_frame"] - cam_data["last_active_frame"] < 30
]
)
# if there are no active cameras
if len(active_cameras) == 0:
# if the layout is already cleared
if len(self.camera_layout) == 0:
return False
# if the layout needs to be cleared
else:
self.camera_layout = []
self.active_cameras = set()
self.clear_frame()
return True
# check if we need to reset the layout because there is a different number of cameras
reset_layout = len(self.active_cameras) - len(active_cameras) != 0
# reset the layout if it needs to be different
if reset_layout:
logger.debug("Added new cameras, resetting layout...")
self.clear_frame()
self.active_cameras = active_cameras
# this also converts added_cameras from a set to a list since we need
# to pop elements in order
active_cameras_to_add = sorted(
active_cameras,
# sort cameras by order and by name if the order is the same
key=lambda active_camera: (
self.config.cameras[active_camera].birdseye.order,
active_camera,
),
)
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
coefficient = (
1
if scaled_width <= self.canvas.width
else self.canvas.width / scaled_width
)
self.camera_layout = [
[
(
camera,
(
0,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
),
)
]
]
else:
# calculate optimal layout
coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
while calculating:
if self.stop_event.is_set():
return
layout_candidate = self.calculate_layout(
active_cameras_to_add,
coefficient,
)
if not layout_candidate:
if coefficient < 10:
coefficient += 1
continue
else:
logger.error("Error finding appropriate birdseye layout")
return
calculating = False
self.canvas.set_coefficient(len(active_cameras), coefficient)
self.camera_layout = layout_candidate
for row in self.camera_layout:
for position in row:
self.copy_to_position(
position[1], position[0], self.cameras[position[0]]["current_frame"]
)
return True
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]:
"""Calculate the optimal layout for 2+ cameras.""" """Calculate the optimal layout for 2+ cameras."""
def map_layout(row_height: int):
"""Map the calculated layout."""
candidate_layout = []
starting_x = 0
x = 0
max_width = 0
y = 0
for row in camera_layout:
final_row = []
max_width = max(max_width, x)
x = starting_x
for cameras in row:
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
camera_aspect = cameras[1]
if camera_dims[1] > camera_dims[0]:
scaled_height = int(row_height * 2)
scaled_width = int(scaled_height * camera_aspect)
starting_x = scaled_width
else:
scaled_height = row_height
scaled_width = int(scaled_height * camera_aspect)
# layout is too large
if (
x + scaled_width > self.canvas.width
or y + scaled_height > self.canvas.height
):
return 0, 0, None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width
y += row_height
candidate_layout.append(final_row)
return max_width, y, candidate_layout
canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient)
camera_layout: list[list[any]] = [] camera_layout: list[list[any]] = []
camera_layout.append([]) camera_layout.append([])
canvas_gcd = math.gcd(canvas[0], canvas[1])
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
starting_x = 0 starting_x = 0
x = starting_x x = starting_x
y = 0 y = 0
@ -286,18 +489,9 @@ class BirdsEyeFrameManager:
max_y = 0 max_y = 0
for camera in cameras_to_add: for camera in cameras_to_add:
camera_dims = self.cameras[camera]["dimensions"].copy() camera_dims = self.cameras[camera]["dimensions"].copy()
camera_gcd = math.gcd(camera_dims[0], camera_dims[1]) camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect(
camera_aspect_x = camera_dims[0] / camera_gcd camera, camera_dims[0], camera_dims[1]
camera_aspect_y = camera_dims[1] / camera_gcd )
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
# account for slightly off 16:9 cameras
camera_aspect_x = 16
camera_aspect_y = 9
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
# make 4:3 cameras the same relative size as 16:9
camera_aspect_x = 12
camera_aspect_y = 9
if camera_dims[1] > camera_dims[0]: if camera_dims[1] > camera_dims[0]:
portrait = True portrait = True
@ -309,10 +503,7 @@ class BirdsEyeFrameManager:
camera_layout[y_i].append( camera_layout[y_i].append(
( (
camera, camera,
( camera_aspect_x / camera_aspect_y,
camera_aspect_x,
camera_aspect_y,
),
) )
) )
@ -338,7 +529,7 @@ class BirdsEyeFrameManager:
camera_layout[y_i].append( camera_layout[y_i].append(
( (
camera, camera,
(camera_aspect_x, camera_aspect_y), camera_aspect_x / camera_aspect_y,
) )
) )
x += camera_aspect_x x += camera_aspect_x
@ -346,143 +537,24 @@ class BirdsEyeFrameManager:
if y + max_y > canvas_aspect_y: if y + max_y > canvas_aspect_y:
return None return None
row_height = int(canvas_height / coefficient) row_height = int(self.canvas.height / coefficient)
total_width, total_height, standard_candidate_layout = map_layout(row_height)
final_camera_layout = [] # layout can't be optimized more
starting_x = 0 if total_width / self.canvas.width >= 0.99:
y = 0 return standard_candidate_layout
for row in camera_layout: scale_up_percent = min(
final_row = [] 1 - (total_width / self.canvas.width),
x = starting_x 1 - (total_height / self.canvas.height),
for cameras in row:
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
if camera_dims[1] > camera_dims[0]:
scaled_height = int(row_height * coefficient)
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
) )
starting_x = scaled_width row_height = int(row_height * (1 + round(scale_up_percent, 1)))
_, _, scaled_layout = map_layout(row_height)
if scaled_layout:
return scaled_layout
else: else:
scaled_height = row_height return standard_candidate_layout
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
)
if (
x + scaled_width > canvas_width
or y + scaled_height > canvas_height
):
return None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width
y += row_height
final_camera_layout.append(final_row)
return final_camera_layout
# determine how many cameras are tracking objects within the last 30 seconds
active_cameras = set(
[
cam
for cam, cam_data in self.cameras.items()
if cam_data["last_active_frame"] > 0
and cam_data["current_frame"] - cam_data["last_active_frame"] < 30
]
)
# if there are no active cameras
if len(active_cameras) == 0:
# if the layout is already cleared
if len(self.camera_layout) == 0:
return False
# if the layout needs to be cleared
else:
self.camera_layout = []
self.active_cameras = set()
self.clear_frame()
return True
# check if we need to reset the layout because there are new cameras to add
reset_layout = (
True if len(active_cameras.difference(self.active_cameras)) > 0 else False
)
# reset the layout if it needs to be different
if reset_layout:
logger.debug("Added new cameras, resetting layout...")
self.clear_frame()
self.active_cameras = active_cameras
# this also converts added_cameras from a set to a list since we need
# to pop elements in order
active_cameras_to_add = sorted(
active_cameras,
# sort cameras by order and by name if the order is the same
key=lambda active_camera: (
self.config.cameras[active_camera].birdseye.order,
active_camera,
),
)
canvas_width = self.config.birdseye.width
canvas_height = self.config.birdseye.height
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1])
coefficient = (
1 if scaled_width <= canvas_width else canvas_width / scaled_width
)
self.camera_layout = [
[
(
camera,
(
0,
0,
int(scaled_width * coefficient),
int(canvas_height * coefficient),
),
)
]
]
else:
# calculate optimal layout
coefficient = 2
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
while calculating:
layout_candidate = calculate_layout(
(canvas_width, canvas_height),
active_cameras_to_add,
coefficient,
)
if not layout_candidate:
if coefficient < 10:
coefficient += 1
continue
else:
logger.error("Error finding appropriate birdseye layout")
return
calculating = False
self.camera_layout = layout_candidate
for row in self.camera_layout:
for position in row:
self.copy_to_position(
position[1], position[0], self.cameras[position[0]]["current_frame"]
)
return True
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool: def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
# don't process if birdseye is disabled for this camera # don't process if birdseye is disabled for this camera
@ -580,7 +652,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
for t in broadcasters.values(): for t in broadcasters.values():
t.start() t.start()
birdseye_manager = BirdsEyeFrameManager(config, frame_manager) birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
if config.birdseye.restream: if config.birdseye.restream:
birdseye_buffer = frame_manager.create( birdseye_buffer = frame_manager.create(

View File

@ -8,10 +8,10 @@ import threading
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from peewee import DatabaseError, DoesNotExist, chunked from peewee import DatabaseError, chunked
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import RECORD_DIR, SECONDS_IN_DAY from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.record.util import remove_empty_directories from frigate.record.util import remove_empty_directories
@ -28,7 +28,7 @@ class RecordingCleanup(threading.Thread):
self.stop_event = stop_event self.stop_event = stop_event
def clean_tmp_clips(self) -> None: def clean_tmp_clips(self) -> None:
# delete any clips more than 5 minutes old """delete any clips in the cache that are more than 5 minutes old."""
for p in Path("/tmp/cache").rglob("clip_*.mp4"): for p in Path("/tmp/cache").rglob("clip_*.mp4"):
logger.debug(f"Checking tmp clip {p}.") logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1): if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
@ -40,8 +40,8 @@ class RecordingCleanup(threading.Thread):
p.unlink(missing_ok=True) p.unlink(missing_ok=True)
def expire_recordings(self) -> None: def expire_recordings(self) -> None:
logger.debug("Start expire recordings (new).") """Delete recordings based on retention config."""
logger.debug("Start expire recordings.")
logger.debug("Start deleted cameras.") logger.debug("Start deleted cameras.")
# Handle deleted cameras # Handle deleted cameras
expire_days = self.config.record.retain.days expire_days = self.config.record.retain.days
@ -161,59 +161,10 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"End camera: {camera}.") logger.debug(f"End camera: {camera}.")
logger.debug("End all cameras.") logger.debug("End all cameras.")
logger.debug("End expire recordings (new).") logger.debug("End expire recordings.")
def expire_files(self) -> None:
logger.debug("Start expire files (legacy).")
default_expire = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * self.config.record.retain.days
)
delete_before = {}
for name, camera in self.config.cameras.items():
delete_before[name] = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * camera.record.retain.days
)
# find all the recordings older than the oldest recording in the db
try:
oldest_recording = (
Recordings.select().order_by(Recordings.start_time).limit(1).get()
)
p = Path(oldest_recording.path)
oldest_timestamp = p.stat().st_mtime - 1
except DoesNotExist:
oldest_timestamp = datetime.datetime.now().timestamp()
except FileNotFoundError:
logger.warning(f"Unable to find file from recordings database: {p}")
Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
return
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
files_to_check = []
for root, _, files in os.walk(RECORD_DIR):
for file in files:
file_path = os.path.join(root, file)
if os.path.getmtime(file_path) < oldest_timestamp:
files_to_check.append(file_path)
for f in files_to_check:
p = Path(f)
try:
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
p.unlink(missing_ok=True)
except FileNotFoundError:
logger.warning(f"Attempted to expire missing file: {f}")
logger.debug("End expire files (legacy).")
def sync_recordings(self) -> None: def sync_recordings(self) -> None:
"""Check the db for stale recordings entries that don't exist in the filesystem."""
logger.debug("Start sync recordings.") logger.debug("Start sync recordings.")
# get all recordings in the db # get all recordings in the db
@ -283,5 +234,4 @@ class RecordingCleanup(threading.Thread):
if counter == 0: if counter == 0:
self.expire_recordings() self.expire_recordings()
self.expire_files()
remove_empty_directories(RECORD_DIR) remove_empty_directories(RECORD_DIR)

View File

@ -3,7 +3,6 @@
import asyncio import asyncio
import datetime import datetime
import logging import logging
import multiprocessing as mp
import os import os
import queue import queue
import random import random
@ -15,13 +14,15 @@ from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from typing import Any, Tuple from typing import Any, Tuple
import faster_fifo as ff
import psutil import psutil
from frigate.config import FrigateConfig, RetainModeEnum from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.types import RecordMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import area, get_video_properties from frigate.util.image import area
from frigate.util.services import get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -30,8 +31,8 @@ class RecordingMaintainer(threading.Thread):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: ff.Queue,
process_info: dict[str, RecordMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
stop_event: MpEvent, stop_event: MpEvent,
): ):
threading.Thread.__init__(self) threading.Thread.__init__(self)

View File

@ -7,6 +7,7 @@ import threading
from types import FrameType from types import FrameType
from typing import Optional from typing import Optional
import faster_fifo as ff
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from setproctitle import setproctitle from setproctitle import setproctitle
@ -14,16 +15,16 @@ from frigate.config import FrigateConfig
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
from frigate.record.cleanup import RecordingCleanup from frigate.record.cleanup import RecordingCleanup
from frigate.record.maintainer import RecordingMaintainer from frigate.record.maintainer import RecordingMaintainer
from frigate.types import RecordMetricsTypes from frigate.types import FeatureMetricsTypes
from frigate.util import listen from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def manage_recordings( def manage_recordings(
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: ff.Queue,
process_info: dict[str, RecordMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
) -> None: ) -> None:
stop_event = mp.Event() stop_event = mp.Event()

View File

@ -17,7 +17,7 @@ from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.types import CameraMetricsTypes, StatsTrackingTypes from frigate.types import CameraMetricsTypes, StatsTrackingTypes
from frigate.util import ( from frigate.util.services import (
get_amd_gpu_stats, get_amd_gpu_stats,
get_bandwidth_stats, get_bandwidth_stats,
get_cpu_stats, get_cpu_stats,
@ -262,8 +262,12 @@ def stats_snapshot(
for name, detector in stats_tracking["detectors"].items(): for name, detector in stats_tracking["detectors"].items():
pid = detector.detect_process.pid if detector.detect_process else None pid = detector.detect_process.pid if detector.detect_process else None
stats["detectors"][name] = { stats["detectors"][name] = {
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), "inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
"detection_start": detector.detection_start.value, # issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"pid": pid, "pid": pid,
} }
stats["detection_fps"] = round(total_detection_fps, 2) stats["detection_fps"] = round(total_detection_fps, 2)

View File

@ -2,7 +2,7 @@
import unittest import unittest
from frigate.util import clean_camera_user_pass, escape_special_characters from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
class TestUserPassCleanup(unittest.TestCase): class TestUserPassCleanup(unittest.TestCase):

View File

@ -9,7 +9,7 @@ from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.detectors import DetectorTypeEnum from frigate.detectors import DetectorTypeEnum
from frigate.plus import PlusApi from frigate.plus import PlusApi
from frigate.util import deep_merge, load_config_with_no_duplicates from frigate.util.builtin import deep_merge, load_config_with_no_duplicates
class TestConfig(unittest.TestCase): class TestConfig(unittest.TestCase):

View File

@ -3,7 +3,7 @@ from unittest import TestCase, main
import cv2 import cv2
import numpy as np import numpy as np
from frigate.util import copy_yuv_to_position, get_yuv_crop from frigate.util.image import copy_yuv_to_position, get_yuv_crop
class TestCopyYuvToPosition(TestCase): class TestCopyYuvToPosition(TestCase):

View File

@ -1,7 +1,7 @@
import unittest import unittest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats
class TestGpuStats(unittest.TestCase): class TestGpuStats(unittest.TestCase):

View File

@ -5,7 +5,7 @@ import numpy as np
from norfair.drawing.color import Palette from norfair.drawing.color import Palette
from norfair.drawing.drawer import Drawer from norfair.drawing.drawer import Drawer
from frigate.util import intersection from frigate.util.image import intersection
from frigate.video import ( from frigate.video import (
get_cluster_boundary, get_cluster_boundary,
get_cluster_candidates, get_cluster_candidates,

View File

@ -3,7 +3,7 @@ from unittest import TestCase, main
import cv2 import cv2
import numpy as np import numpy as np
from frigate.util import yuv_region_2_rgb from frigate.util.image import yuv_region_2_rgb
class TestYuvRegion2RGB(TestCase): class TestYuvRegion2RGB(TestCase):

View File

@ -3,13 +3,14 @@
import logging import logging
import queue import queue
import threading import threading
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from faster_fifo import Queue
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.models import Timeline from frigate.models import Timeline
from frigate.util import to_relative_box from frigate.util.builtin import to_relative_box
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -7,7 +7,7 @@ from norfair.drawing.drawer import Drawer
from frigate.config import DetectConfig from frigate.config import DetectConfig
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.util import intersection_over_union from frigate.util.image import intersection_over_union
# Normalizes distance from estimate relative to object size # Normalizes distance from estimate relative to object size

View File

@ -1,8 +1,9 @@
from multiprocessing.context import Process from multiprocessing.context import Process
from multiprocessing.queues import Queue
from multiprocessing.sharedctypes import Synchronized from multiprocessing.sharedctypes import Synchronized
from typing import Optional, TypedDict from typing import Optional, TypedDict
from faster_fifo import Queue
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
@ -24,7 +25,8 @@ class CameraMetricsTypes(TypedDict):
skipped_fps: Synchronized skipped_fps: Synchronized
class RecordMetricsTypes(TypedDict): class FeatureMetricsTypes(TypedDict):
audio_enabled: Synchronized
record_enabled: Synchronized record_enabled: Synchronized

226
frigate/util/builtin.py Normal file
View File

@ -0,0 +1,226 @@
"""Utilities for builtin types manipulation."""
import copy
import ctypes
import datetime
import logging
import multiprocessing
import re
import shlex
import time
import urllib.parse
from collections import Counter
from collections.abc import Mapping
from queue import Empty, Full
from typing import Any, Tuple
import pytz
import yaml
from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT
from faster_fifo import Queue as FFQueue
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
logger = logging.getLogger(__name__)
class EventsPerSecond:
def __init__(self, max_events=1000, last_n_seconds=10):
self._start = None
self._max_events = max_events
self._last_n_seconds = last_n_seconds
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
self._timestamps.append(now)
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
self.expire_timestamps(now)
def eps(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self._start = now
# compute the (approximate) events in the last n seconds
self.expire_timestamps(now)
seconds = min(now - self._start, self._last_n_seconds)
# avoid divide by zero
if seconds == 0:
seconds = 1
return len(self._timestamps) / seconds
# remove aged out timestamps
def expire_timestamps(self, now):
threshold = now - self._last_n_seconds
while self._timestamps and self._timestamps[0] < threshold:
del self._timestamps[0]
class LimitedQueue(FFQueue):
def __init__(
self,
maxsize=0,
max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE,
loads=None,
dumps=None,
):
super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps)
self.maxsize = maxsize
self.size = multiprocessing.RawValue(
ctypes.c_int, 0
) # Add a counter for the number of items in the queue
def put(self, x, block=True, timeout=DEFAULT_TIMEOUT):
if self.maxsize > 0 and self.size.value >= self.maxsize:
if block:
start_time = time.time()
while self.size.value >= self.maxsize:
remaining = timeout - (time.time() - start_time)
if remaining <= 0.0:
raise Full
time.sleep(min(remaining, 0.1))
else:
raise Full
self.size.value += 1
return super().put(x, block=block, timeout=timeout)
def get(self, block=True, timeout=DEFAULT_TIMEOUT):
if self.size.value <= 0 and not block:
raise Empty
self.size.value -= 1
return super().get(block=block, timeout=timeout)
def qsize(self):
return self.size
def empty(self):
return self.qsize() == 0
def full(self):
return self.qsize() == self.maxsize
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
"""
:param dct1: First dict to merge
:param dct2: Second dict to merge
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
:return: The merge dictionary
"""
merged = copy.deepcopy(dct1)
for k, v2 in dct2.items():
if k in merged:
v1 = merged[k]
if isinstance(v1, dict) and isinstance(v2, Mapping):
merged[k] = deep_merge(v1, v2, override)
elif isinstance(v1, list) and isinstance(v2, list):
if merge_lists:
merged[k] = v1 + v2
else:
if override:
merged[k] = copy.deepcopy(v2)
else:
merged[k] = copy.deepcopy(v2)
return merged
def load_config_with_no_duplicates(raw_config) -> dict:
"""Get config ensuring duplicate keys are not allowed."""
# https://stackoverflow.com/a/71751051
class PreserveDuplicatesLoader(yaml.loader.Loader):
pass
def map_constructor(loader, node, deep=False):
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = {}
for key, val in zip(keys, vals):
if key_count[key] > 1:
raise ValueError(
f"Config input {key} is defined multiple times for the same field, this is not allowed."
)
else:
data[key] = val
return data
PreserveDuplicatesLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
)
return yaml.load(raw_config, PreserveDuplicatesLoader)
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
def escape_special_characters(path: str) -> str:
"""Cleans reserved characters to encodings for ffmpeg."""
try:
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
pw = found[(found.index(":") + 1) :]
return path.replace(pw, urllib.parse.quote_plus(pw))
except AttributeError:
# path does not have user:pass
return path
def get_ffmpeg_arg_list(arg: Any) -> list:
"""Use arg if list or convert to list format."""
return arg if isinstance(arg, list) else shlex.split(arg)
def load_labels(path, encoding="utf-8"):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
labels = {index: "unknown" for index in range(91)}
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
labels.update({int(index): label.strip() for index, label in pairs})
else:
labels.update({index: line.strip() for index, line in enumerate(lines)})
return labels
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
seconds_offset = (
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
)
hours_offset = int(seconds_offset / 60 / 60)
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int, int, int, int]:
return (
box[0] / width, # x
box[1] / height, # y
(box[2] - box[0]) / width, # w
(box[3] - box[1]) / height, # h
)

663
frigate/util.py → frigate/util/image.py Executable file → Normal file
View File

@ -1,84 +1,17 @@
import copy """Utilities for creating and manipulating image frames."""
import datetime import datetime
import json
import logging import logging
import os
import re
import shlex
import signal
import subprocess as sp
import traceback
import urllib.parse
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections import Counter
from collections.abc import Mapping
from multiprocessing import shared_memory from multiprocessing import shared_memory
from typing import Any, AnyStr, Optional, Tuple from typing import AnyStr, Optional
import cv2 import cv2
import numpy as np import numpy as np
import psutil
import py3nvml.py3nvml as nvml
import pytz
import yaml
from ruamel.yaml import YAML
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
"""
:param dct1: First dict to merge
:param dct2: Second dict to merge
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
:return: The merge dictionary
"""
merged = copy.deepcopy(dct1)
for k, v2 in dct2.items():
if k in merged:
v1 = merged[k]
if isinstance(v1, dict) and isinstance(v2, Mapping):
merged[k] = deep_merge(v1, v2, override)
elif isinstance(v1, list) and isinstance(v2, list):
if merge_lists:
merged[k] = v1 + v2
else:
if override:
merged[k] = copy.deepcopy(v2)
else:
merged[k] = copy.deepcopy(v2)
return merged
def load_config_with_no_duplicates(raw_config) -> dict:
"""Get config ensuring duplicate keys are not allowed."""
# https://stackoverflow.com/a/71751051
class PreserveDuplicatesLoader(yaml.loader.Loader):
pass
def map_constructor(loader, node, deep=False):
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
key_count = Counter(keys)
data = {}
for key, val in zip(keys, vals):
if key_count[key] > 1:
raise ValueError(
f"Config input {key} is defined multiple times for the same field, this is not allowed."
)
else:
data[key] = val
return data
PreserveDuplicatesLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
)
return yaml.load(raw_config, PreserveDuplicatesLoader)
def draw_timestamp( def draw_timestamp(
frame, frame,
timestamp, timestamp,
@ -640,424 +573,6 @@ def clipped(obj, frame_shape):
return False return False
def restart_frigate():
proc = psutil.Process(1)
# if this is running via s6, sigterm pid 1
if proc.name() == "s6-svscan":
proc.terminate()
# otherwise, just try and exit frigate
else:
os.kill(os.getpid(), signal.SIGTERM)
class EventsPerSecond:
def __init__(self, max_events=1000):
self._start = None
self._max_events = max_events
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
if self._start is None:
self.start()
self._timestamps.append(datetime.datetime.now().timestamp())
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
def eps(self, last_n_seconds=10):
if self._start is None:
self.start()
# compute the (approximate) events in the last n seconds
now = datetime.datetime.now().timestamp()
seconds = min(now - self._start, last_n_seconds)
# avoid divide by zero
if seconds == 0:
seconds = 1
return (
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
)
def print_stack(sig, frame):
traceback.print_stack(frame)
def listen():
signal.signal(signal.SIGUSR1, print_stack)
def create_mask(frame_shape, mask):
mask_img = np.zeros(frame_shape, np.uint8)
mask_img[:] = 255
if isinstance(mask, list):
for m in mask:
add_mask(m, mask_img)
elif isinstance(mask, str):
add_mask(mask, mask_img)
return mask_img
def add_mask(mask, mask_img):
points = mask.split(",")
contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
)
cv2.fillPoly(mask_img, pts=[contour], color=(0))
def load_labels(path, encoding="utf-8"):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
labels = {index: "unknown" for index in range(91)}
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
labels.update({int(index): label.strip() for index, label in pairs})
else:
labels.update({index: line.strip() for index, line in enumerate(lines)})
return labels
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
def escape_special_characters(path: str) -> str:
"""Cleans reserved characters to encodings for ffmpeg."""
try:
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
pw = found[(found.index(":") + 1) :]
return path.replace(pw, urllib.parse.quote_plus(pw))
except AttributeError:
# path does not have user:pass
return path
def get_cgroups_version() -> str:
"""Determine what version of cgroups is enabled."""
cgroup_path = "/sys/fs/cgroup"
if not os.path.ismount(cgroup_path):
logger.debug(f"{cgroup_path} is not a mount point.")
return "unknown"
try:
with open("/proc/mounts", "r") as f:
mounts = f.readlines()
for mount in mounts:
mount_info = mount.split()
if mount_info[1] == cgroup_path:
fs_type = mount_info[2]
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
return "cgroup2"
elif fs_type == "tmpfs":
return "cgroup"
else:
logger.debug(
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
)
break
except Exception as e:
logger.debug(f"Could not determine cgroups version: {e}")
return "unknown"
def get_docker_memlimit_bytes() -> int:
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
# check running a supported cgroups version
if get_cgroups_version() == "cgroup2":
memlimit_path = "/sys/fs/cgroup/memory.max"
try:
with open(memlimit_path, "r") as f:
value = f.read().strip()
if value.isnumeric():
return int(value)
elif value.lower() == "max":
return -1
except Exception as e:
logger.debug(f"Unable to get docker memlimit: {e}")
return -1
def get_cpu_stats() -> dict[str, dict]:
"""Get cpu usages for each process id"""
usages = {}
docker_memlimit = get_docker_memlimit_bytes() / 1024
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
pid = process.info["pid"]
try:
cpu_percent = process.info["cpu_percent"]
cmdline = process.info["cmdline"]
with open(f"/proc/{pid}/stat", "r") as f:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
with open(f"/proc/{pid}/statm", "r") as f:
mem_stats = f.readline().split()
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
if docker_memlimit > 0:
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
else:
mem_pct = round((mem_res / total_mem) * 100, 1)
usages[pid] = {
"cpu": str(cpu_percent),
"cpu_average": str(round(cpu_average_usage, 2)),
"mem": f"{mem_pct}",
"cmdline": " ".join(cmdline),
}
except Exception:
continue
return usages
def get_physical_interfaces(interfaces) -> list:
with open("/proc/net/dev", "r") as file:
lines = file.readlines()
physical_interfaces = []
for line in lines:
if ":" in line:
interface = line.split(":")[0].strip()
for int in interfaces:
if interface.startswith(int):
physical_interfaces.append(interface)
return physical_interfaces
def get_bandwidth_stats(config) -> dict[str, dict]:
"""Get bandwidth usages for each ffmpeg process id"""
usages = {}
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
config.telemetry.network_interfaces
)
p = sp.run(
top_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
return usages
else:
lines = p.stdout.split("\n")
for line in lines:
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
try:
if re.search(
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
):
process = stats[0].split("/")
usages[process[len(process) - 2]] = {
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
}
except (IndexError, ValueError):
continue
return usages
def get_amd_gpu_stats() -> dict[str, str]:
"""Get stats using radeontop."""
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
p = sp.run(
radeontop_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
return None
else:
usages = p.stdout.split(",")
results: dict[str, str] = {}
for hw in usages:
if "gpu" in hw:
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
elif "vram" in hw:
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
return results
def get_intel_gpu_stats() -> dict[str, str]:
"""Get stats using intel_gpu_top."""
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1",
]
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
reading = "".join(p.stdout.split())
results: dict[str, str] = {}
# render is used for qsv
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
render_avg = sum(render) / len(render)
else:
render_avg = 1
# video is used for vaapi
video = []
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
video_avg = sum(video) / len(video)
else:
video_avg = 1
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
results["mem"] = "-%"
return results
def try_get_info(f, h, default="N/A"):
try:
v = f(h)
except nvml.NVMLError_NotSupported:
v = default
return v
def get_nvidia_gpu_stats() -> dict[int, dict]:
results = {}
try:
nvml.nvmlInit()
deviceCount = nvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
if util != "N/A":
gpu_util = util.gpu
else:
gpu_util = 0
if meminfo != "N/A":
gpu_mem_util = meminfo.used / meminfo.total * 100
else:
gpu_mem_util = -1
results[i] = {
"name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util,
"mem": gpu_mem_util,
}
except Exception:
pass
finally:
return results
def ffprobe_stream(path: str) -> sp.CompletedProcess:
"""Run ffprobe on stream."""
clean_path = escape_special_characters(path)
ffprobe_cmd = [
"ffprobe",
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
"-loglevel",
"quiet",
clean_path,
]
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo."""
ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True)
def get_ffmpeg_arg_list(arg: Any) -> list:
"""Use arg if list or convert to list format."""
return arg if isinstance(arg, list) else shlex.split(arg)
class FrameManager(ABC): class FrameManager(ABC):
@abstractmethod @abstractmethod
def create(self, name, size) -> AnyStr: def create(self, name, size) -> AnyStr:
@ -1125,157 +640,23 @@ class SharedMemoryFrameManager(FrameManager):
del self.shm_store[name] del self.shm_store[name]
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]: def create_mask(frame_shape, mask):
seconds_offset = ( mask_img = np.zeros(frame_shape, np.uint8)
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds() mask_img[:] = 255
if isinstance(mask, list):
for m in mask:
add_mask(m, mask_img)
elif isinstance(mask, str):
add_mask(mask, mask_img)
return mask_img
def add_mask(mask, mask_img):
points = mask.split(",")
contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
) )
hours_offset = int(seconds_offset / 60 / 60) cv2.fillPoly(mask_img, pts=[contour], color=(0))
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
hour_modifier = f"{hours_offset} hour"
minute_modifier = f"{minutes_offset} minute"
return hour_modifier, minute_modifier
def to_relative_box(
width: int, height: int, box: Tuple[int, int, int, int]
) -> Tuple[int, int, int, int]:
return (
box[0] / width, # x
box[1] / height, # y
(box[2] - box[0]) / width, # w
(box[3] - box[1]) / height, # h
)
def get_video_properties(url, get_duration=False):
def calculate_duration(video: Optional[any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
return duration
width = height = 0
try:
# Open the video stream
video = cv2.VideoCapture(url)
# Check if the video stream was opened successfully
if not video.isOpened():
video = None
except Exception:
video = None
result = {}
if get_duration:
result["duration"] = calculate_duration(video)
if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
return result
def update_yaml_from_url(file_path, url):
parsed_url = urllib.parse.urlparse(url)
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
for key_path_str, new_value_list in query_string.items():
key_path = key_path_str.split(".")
for i in range(len(key_path)):
try:
index = int(key_path[i])
key_path[i] = (key_path[i - 1], index)
key_path.pop(i - 1)
except ValueError:
pass
new_value = new_value_list[0]
update_yaml_file(file_path, key_path, new_value)
def update_yaml_file(file_path, key_path, new_value):
yaml = YAML()
with open(file_path, "r") as f:
data = yaml.load(f)
data = update_yaml(data, key_path, new_value)
with open(file_path, "w") as f:
yaml.dump(data, f)
def update_yaml(data, key_path, new_value):
temp = data
for key in key_path[:-1]:
if isinstance(key, tuple):
if key[0] not in temp:
temp[key[0]] = [{}] * max(1, key[1] + 1)
elif len(temp[key[0]]) <= key[1]:
temp[key[0]] += [{}] * (key[1] - len(temp[key[0]]) + 1)
temp = temp[key[0]][key[1]]
else:
if key not in temp:
temp[key] = {}
temp = temp[key]
last_key = key_path[-1]
if new_value == "":
if isinstance(last_key, tuple):
del temp[last_key[0]][last_key[1]]
else:
del temp[last_key]
else:
if isinstance(last_key, tuple):
if last_key[0] not in temp:
temp[last_key[0]] = [{}] * max(1, last_key[1] + 1)
elif len(temp[last_key[0]]) <= last_key[1]:
temp[last_key[0]] += [{}] * (last_key[1] - len(temp[last_key[0]]) + 1)
temp[last_key[0]][last_key[1]] = new_value
else:
if (
last_key in temp
and isinstance(temp[last_key], dict)
and isinstance(new_value, dict)
):
temp[last_key].update(new_value)
else:
temp[last_key] = new_value
return data

403
frigate/util/services.py Normal file
View File

@ -0,0 +1,403 @@
"""Utilities for services."""
import json
import logging
import os
import re
import signal
import subprocess as sp
import traceback
from typing import Optional
import cv2
import psutil
import py3nvml.py3nvml as nvml
from frigate.util.builtin import escape_special_characters
logger = logging.getLogger(__name__)
def restart_frigate():
proc = psutil.Process(1)
# if this is running via s6, sigterm pid 1
if proc.name() == "s6-svscan":
proc.terminate()
# otherwise, just try and exit frigate
else:
os.kill(os.getpid(), signal.SIGTERM)
def print_stack(sig, frame):
traceback.print_stack(frame)
def listen():
signal.signal(signal.SIGUSR1, print_stack)
def get_cgroups_version() -> str:
"""Determine what version of cgroups is enabled."""
cgroup_path = "/sys/fs/cgroup"
if not os.path.ismount(cgroup_path):
logger.debug(f"{cgroup_path} is not a mount point.")
return "unknown"
try:
with open("/proc/mounts", "r") as f:
mounts = f.readlines()
for mount in mounts:
mount_info = mount.split()
if mount_info[1] == cgroup_path:
fs_type = mount_info[2]
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
return "cgroup2"
elif fs_type == "tmpfs":
return "cgroup"
else:
logger.debug(
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
)
break
except Exception as e:
logger.debug(f"Could not determine cgroups version: {e}")
return "unknown"
def get_docker_memlimit_bytes() -> int:
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
# check running a supported cgroups version
if get_cgroups_version() == "cgroup2":
memlimit_path = "/sys/fs/cgroup/memory.max"
try:
with open(memlimit_path, "r") as f:
value = f.read().strip()
if value.isnumeric():
return int(value)
elif value.lower() == "max":
return -1
except Exception as e:
logger.debug(f"Unable to get docker memlimit: {e}")
return -1
def get_cpu_stats() -> dict[str, dict]:
"""Get cpu usages for each process id"""
usages = {}
docker_memlimit = get_docker_memlimit_bytes() / 1024
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
pid = process.info["pid"]
try:
cpu_percent = process.info["cpu_percent"]
cmdline = process.info["cmdline"]
with open(f"/proc/{pid}/stat", "r") as f:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
with open(f"/proc/{pid}/statm", "r") as f:
mem_stats = f.readline().split()
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
if docker_memlimit > 0:
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
else:
mem_pct = round((mem_res / total_mem) * 100, 1)
usages[pid] = {
"cpu": str(cpu_percent),
"cpu_average": str(round(cpu_average_usage, 2)),
"mem": f"{mem_pct}",
"cmdline": " ".join(cmdline),
}
except Exception:
continue
return usages
def get_physical_interfaces(interfaces) -> list:
with open("/proc/net/dev", "r") as file:
lines = file.readlines()
physical_interfaces = []
for line in lines:
if ":" in line:
interface = line.split(":")[0].strip()
for int in interfaces:
if interface.startswith(int):
physical_interfaces.append(interface)
return physical_interfaces
def get_bandwidth_stats(config) -> dict[str, dict]:
"""Get bandwidth usages for each ffmpeg process id"""
usages = {}
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
config.telemetry.network_interfaces
)
p = sp.run(
top_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
return usages
else:
lines = p.stdout.split("\n")
for line in lines:
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
try:
if re.search(
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
):
process = stats[0].split("/")
usages[process[len(process) - 2]] = {
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
}
except (IndexError, ValueError):
continue
return usages
def get_amd_gpu_stats() -> dict[str, str]:
"""Get stats using radeontop."""
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
p = sp.run(
radeontop_command,
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
return None
else:
usages = p.stdout.split(",")
results: dict[str, str] = {}
for hw in usages:
if "gpu" in hw:
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
elif "vram" in hw:
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
return results
def get_intel_gpu_stats() -> dict[str, str]:
"""Get stats using intel_gpu_top."""
intel_gpu_top_command = [
"timeout",
"0.5s",
"intel_gpu_top",
"-J",
"-o",
"-",
"-s",
"1",
]
p = sp.run(
intel_gpu_top_command,
encoding="ascii",
capture_output=True,
)
# timeout has a non-zero returncode when timeout is reached
if p.returncode != 124:
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
return None
else:
reading = "".join(p.stdout.split())
results: dict[str, str] = {}
# render is used for qsv
render = []
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[14:])
single = packet.get("busy", 0.0)
render.append(float(single))
if render:
render_avg = sum(render) / len(render)
else:
render_avg = 1
# video is used for vaapi
video = []
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
packet = json.loads(result[10:])
single = packet.get("busy", 0.0)
video.append(float(single))
if video:
video_avg = sum(video) / len(video)
else:
video_avg = 1
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
results["mem"] = "-%"
return results
def try_get_info(f, h, default="N/A"):
try:
v = f(h)
except nvml.NVMLError_NotSupported:
v = default
return v
def get_nvidia_gpu_stats() -> dict[int, dict]:
results = {}
try:
nvml.nvmlInit()
deviceCount = nvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
if util != "N/A":
gpu_util = util.gpu
else:
gpu_util = 0
if meminfo != "N/A":
gpu_mem_util = meminfo.used / meminfo.total * 100
else:
gpu_mem_util = -1
results[i] = {
"name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util,
"mem": gpu_mem_util,
}
except Exception:
pass
finally:
return results
def ffprobe_stream(path: str) -> sp.CompletedProcess:
"""Run ffprobe on stream."""
clean_path = escape_special_characters(path)
ffprobe_cmd = [
"ffprobe",
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
"-loglevel",
"quiet",
clean_path,
]
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo."""
ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True)
def get_video_properties(url, get_duration=False):
def calculate_duration(video: Optional[any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
return duration
width = height = 0
try:
# Open the video stream
video = cv2.VideoCapture(url)
# Check if the video stream was opened successfully
if not video.isOpened():
video = None
except Exception:
video = None
result = {}
if get_duration:
result["duration"] = calculate_duration(video)
if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
return result

View File

@ -11,11 +11,12 @@ import time
from collections import defaultdict from collections import defaultdict
import cv2 import cv2
import faster_fifo as ff
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig from frigate.config import CameraConfig, DetectConfig, ModelConfig
from frigate.const import CACHE_DIR from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
from frigate.detectors.detector_config import PixelFormatEnum from frigate.detectors.detector_config import PixelFormatEnum
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
@ -23,8 +24,8 @@ from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector from frigate.object_detection import RemoteObjectDetector
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.util import ( from frigate.util.builtin import EventsPerSecond
EventsPerSecond, from frigate.util.image import (
FrameManager, FrameManager,
SharedMemoryFrameManager, SharedMemoryFrameManager,
area, area,
@ -32,11 +33,11 @@ from frigate.util import (
draw_box_with_label, draw_box_with_label,
intersection, intersection,
intersection_over_union, intersection_over_union,
listen,
yuv_region_2_bgr, yuv_region_2_bgr,
yuv_region_2_rgb, yuv_region_2_rgb,
yuv_region_2_yuv, yuv_region_2_yuv,
) )
from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -95,7 +96,17 @@ def filtered(obj, objects_to_track, object_filters):
return False return False
def create_tensor_input(frame, model_config, region): def get_min_region_size(model_config: ModelConfig) -> int:
"""Get the min region size and ensure it is divisible by 4."""
half = int(max(model_config.height, model_config.width) / 2)
if half % 4 == 0:
return half
return int((half + 3) / 4) * 4
def create_tensor_input(frame, model_config: ModelConfig, region):
if model_config.input_pixel_format == PixelFormatEnum.rgb: if model_config.input_pixel_format == PixelFormatEnum.rgb:
cropped_frame = yuv_region_2_rgb(frame, region) cropped_frame = yuv_region_2_rgb(frame, region)
elif model_config.input_pixel_format == PixelFormatEnum.bgr: elif model_config.input_pixel_format == PixelFormatEnum.bgr:
@ -172,7 +183,7 @@ def capture_frames(
skipped_eps.start() skipped_eps.start()
while True: while True:
fps.value = frame_rate.eps() fps.value = frame_rate.eps()
skipped_eps.eps() skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.datetime.now().timestamp() current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{camera_name}{current_frame.value}" frame_name = f"{camera_name}{current_frame.value}"
@ -195,17 +206,16 @@ def capture_frames(
frame_rate.update() frame_rate.update()
# if the queue is full, skip this frame # don't lock the queue to check, just try since it should rarely be full
if frame_queue.full(): try:
skipped_eps.update() # add to the queue
frame_manager.delete(frame_name) frame_queue.put(current_frame.value, False)
continue
# close the frame # close the frame
frame_manager.close(frame_name) frame_manager.close(frame_name)
except queue.Full:
# add to the queue # if the queue is full, skip this frame
frame_queue.put(current_frame.value) skipped_eps.update()
frame_manager.delete(frame_name)
class CameraWatchdog(threading.Thread): class CameraWatchdog(threading.Thread):
@ -215,6 +225,7 @@ class CameraWatchdog(threading.Thread):
config: CameraConfig, config: CameraConfig,
frame_queue, frame_queue,
camera_fps, camera_fps,
skipped_fps,
ffmpeg_pid, ffmpeg_pid,
stop_event, stop_event,
): ):
@ -227,11 +238,13 @@ class CameraWatchdog(threading.Thread):
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect") self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
self.ffmpeg_other_processes: list[dict[str, any]] = [] self.ffmpeg_other_processes: list[dict[str, any]] = []
self.camera_fps = camera_fps self.camera_fps = camera_fps
self.skipped_fps = skipped_fps
self.ffmpeg_pid = ffmpeg_pid self.ffmpeg_pid = ffmpeg_pid
self.frame_queue = frame_queue self.frame_queue = frame_queue
self.frame_shape = self.config.frame_shape_yuv self.frame_shape = self.config.frame_shape_yuv
self.frame_size = self.frame_shape[0] * self.frame_shape[1] self.frame_size = self.frame_shape[0] * self.frame_shape[1]
self.stop_event = stop_event self.stop_event = stop_event
self.sleeptime = self.config.ffmpeg.retry_interval
def run(self): def run(self):
self.start_ffmpeg_detect() self.start_ffmpeg_detect()
@ -251,8 +264,8 @@ class CameraWatchdog(threading.Thread):
} }
) )
time.sleep(10) time.sleep(self.sleeptime)
while not self.stop_event.wait(10): while not self.stop_event.wait(self.sleeptime):
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():
@ -346,6 +359,7 @@ class CameraWatchdog(threading.Thread):
self.frame_shape, self.frame_shape,
self.frame_queue, self.frame_queue,
self.camera_fps, self.camera_fps,
self.skipped_fps,
self.stop_event, self.stop_event,
) )
self.capture_thread.start() self.capture_thread.start()
@ -376,7 +390,14 @@ class CameraWatchdog(threading.Thread):
class CameraCapture(threading.Thread): class CameraCapture(threading.Thread):
def __init__( def __init__(
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event self,
camera_name,
ffmpeg_process,
frame_shape,
frame_queue,
fps,
skipped_fps,
stop_event,
): ):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = f"capture:{camera_name}" self.name = f"capture:{camera_name}"
@ -385,14 +406,13 @@ class CameraCapture(threading.Thread):
self.frame_queue = frame_queue self.frame_queue = frame_queue
self.fps = fps self.fps = fps
self.stop_event = stop_event self.stop_event = stop_event
self.skipped_fps = EventsPerSecond() self.skipped_fps = skipped_fps
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process self.ffmpeg_process = ffmpeg_process
self.current_frame = mp.Value("d", 0.0) self.current_frame = mp.Value("d", 0.0)
self.last_frame = 0 self.last_frame = 0
def run(self): def run(self):
self.skipped_fps.start()
capture_frames( capture_frames(
self.ffmpeg_process, self.ffmpeg_process,
self.camera_name, self.camera_name,
@ -424,6 +444,7 @@ def capture_camera(name, config: CameraConfig, process_info):
config, config,
frame_queue, frame_queue,
process_info["camera_fps"], process_info["camera_fps"],
process_info["skipped_fps"],
process_info["ffmpeg_pid"], process_info["ffmpeg_pid"],
stop_event, stop_event,
) )
@ -706,15 +727,15 @@ def get_consolidated_object_detections(detected_object_groups):
def process_frames( def process_frames(
camera_name: str, camera_name: str,
frame_queue: mp.Queue, frame_queue: ff.Queue,
frame_shape, frame_shape,
model_config, model_config: ModelConfig,
detect_config: DetectConfig, detect_config: DetectConfig,
frame_manager: FrameManager, frame_manager: FrameManager,
motion_detector: MotionDetector, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, detected_objects_queue: ff.Queue,
process_info: dict, process_info: dict,
objects_to_track: list[str], objects_to_track: list[str],
object_filters, object_filters,
@ -723,14 +744,6 @@ def process_frames(
stop_event, stop_event,
exit_on_empty: bool = False, exit_on_empty: bool = False,
): ):
# attribute labels are not tracked and are not assigned regions
attribute_label_map = {
"person": ["face", "amazon"],
"car": ["ups", "fedex", "amazon", "license_plate"],
}
all_attribute_labels = [
item for sublist in attribute_label_map.values() for item in sublist
]
fps = process_info["process_fps"] fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"] detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"] current_frame_time = process_info["detection_frame"]
@ -740,16 +753,18 @@ def process_frames(
startup_scan_counter = 0 startup_scan_counter = 0
region_min_size = int(max(model_config.height, model_config.width) / 2) region_min_size = get_min_region_size(model_config)
while not stop_event.is_set(): while not stop_event.is_set():
if exit_on_empty and frame_queue.empty():
logger.info("Exiting track_objects...")
break
try: try:
if exit_on_empty:
frame_time = frame_queue.get(False)
else:
frame_time = frame_queue.get(True, 1) frame_time = frame_queue.get(True, 1)
except queue.Empty: except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue continue
current_frame_time.value = frame_time current_frame_time.value = frame_time
@ -906,7 +921,7 @@ def process_frames(
tracked_detections = [ tracked_detections = [
d d
for d in consolidated_detections for d in consolidated_detections
if d[0] not in all_attribute_labels if d[0] not in ALL_ATTRIBUTE_LABELS
] ]
# now that we have refined our detections, we need to track objects # now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections) object_tracker.match_and_update(frame_time, tracked_detections)
@ -916,7 +931,7 @@ def process_frames(
# group the attribute detections based on what label they apply to # group the attribute detections based on what label they apply to
attribute_detections = {} attribute_detections = {}
for label, attribute_labels in attribute_label_map.items(): for label, attribute_labels in ATTRIBUTE_LABEL_MAP.items():
attribute_detections[label] = [ attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels d for d in consolidated_detections if d[0] in attribute_labels
] ]

View File

@ -5,7 +5,7 @@ import time
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from frigate.object_detection import ObjectDetectProcess from frigate.object_detection import ObjectDetectProcess
from frigate.util import restart_frigate from frigate.util.services import restart_frigate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -24,7 +24,9 @@ class FrigateWatchdog(threading.Thread):
# check the detection processes # check the detection processes
for detector in self.detectors.values(): for detector in self.detectors.values():
detection_start = detector.detection_start.value detection_start = detector.detection_start.value # type: ignore[attr-defined]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
if detection_start > 0.0 and now - detection_start > 10: if detection_start > 0.0 and now - detection_start > 10:
logger.info( logger.info(
"Detection appears to be stuck. Restarting detection process..." "Detection appears to be stuck. Restarting detection process..."

View File

@ -1,14 +1,15 @@
click == 8.1.* click == 8.1.*
Flask == 2.3.* Flask == 2.3.*
faster-fifo == 1.4.*
imutils == 0.5.* imutils == 0.5.*
matplotlib == 3.7.* matplotlib == 3.7.*
mypy == 0.942 mypy == 1.4.1
numpy == 1.23.* numpy == 1.23.*
onvif_zeep == 0.2.12 onvif_zeep == 0.2.12
opencv-python-headless == 4.5.5.* opencv-python-headless == 4.7.0.*
paho-mqtt == 1.6.* paho-mqtt == 1.6.*
peewee == 3.16.* peewee == 3.16.*
peewee_migrate == 1.10.* peewee_migrate == 1.11.*
psutil == 5.9.* psutil == 5.9.*
pydantic == 1.10.* pydantic == 1.10.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml

View File

@ -16,6 +16,7 @@ export const handlers = [
front: { front: {
name: 'front', name: 'front',
objects: { track: ['taco', 'cat', 'dog'] }, objects: { track: ['taco', 'cat', 'dog'] },
audio: { enabled: false, enabled_in_config: false },
record: { enabled: true, enabled_in_config: true }, record: { enabled: true, enabled_in_config: true },
detect: { width: 1280, height: 720 }, detect: { width: 1280, height: 720 },
snapshots: {}, snapshots: {},
@ -25,6 +26,7 @@ export const handlers = [
side: { side: {
name: 'side', name: 'side',
objects: { track: ['taco', 'cat', 'dog'] }, objects: { track: ['taco', 'cat', 'dog'] },
audio: { enabled: false, enabled_in_config: false },
record: { enabled: false, enabled_in_config: true }, record: { enabled: false, enabled_in_config: true },
detect: { width: 1280, height: 720 }, detect: { width: 1280, height: 720 },
snapshots: {}, snapshots: {},

View File

@ -8,7 +8,7 @@
<link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png" /> <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png" />
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png" /> <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png" />
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png" /> <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png" />
<link rel="icon" type="image/svg+xml" href="/images/favicon.svg"> <link rel="icon" type="image/svg+xml" href="/images/favicon.svg" />
<link rel="manifest" href="/site.webmanifest" /> <link rel="manifest" href="/site.webmanifest" />
<link rel="mask-icon" href="/images/favicon.svg" color="#3b82f7" /> <link rel="mask-icon" href="/images/favicon.svg" color="#3b82f7" />
<meta name="msapplication-TileColor" content="#3b82f7" /> <meta name="msapplication-TileColor" content="#3b82f7" />

955
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -38,8 +38,8 @@
"@testing-library/user-event": "^14.4.3", "@testing-library/user-event": "^14.4.3",
"@typescript-eslint/eslint-plugin": "^5.59.1", "@typescript-eslint/eslint-plugin": "^5.59.1",
"@typescript-eslint/parser": "^5.59.1", "@typescript-eslint/parser": "^5.59.1",
"@vitest/coverage-c8": "^0.31.0", "@vitest/coverage-v8": "^0.32.2",
"@vitest/ui": "^0.31.0", "@vitest/ui": "^0.32.2",
"autoprefixer": "^10.4.14", "autoprefixer": "^10.4.14",
"eslint": "^8.39.0", "eslint": "^8.39.0",
"eslint-config-preact": "^1.3.0", "eslint-config-preact": "^1.3.0",
@ -53,6 +53,6 @@
"tailwindcss": "^3.3.2", "tailwindcss": "^3.3.2",
"typescript": "^5.0.4", "typescript": "^5.0.4",
"vite": "^4.3.5", "vite": "^4.3.5",
"vitest": "^0.31.0" "vitest": "^0.32.2"
} }
} }

View File

@ -113,8 +113,8 @@ describe('WsProvider', () => {
vi.spyOn(Date, 'now').mockReturnValue(123456); vi.spyOn(Date, 'now').mockReturnValue(123456);
const config = { const config = {
cameras: { cameras: {
front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } }, front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true }, audio: { enabled: false } },
side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false } }, side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false }, audio: { enabled: false } },
}, },
}; };
render( render(

View File

@ -41,10 +41,11 @@ export function WsProvider({
useEffect(() => { useEffect(() => {
Object.keys(config.cameras).forEach((camera) => { Object.keys(config.cameras).forEach((camera) => {
const { name, record, detect, snapshots } = config.cameras[camera]; const { name, record, detect, snapshots, audio } = config.cameras[camera];
dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF', retain: false }); dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF', retain: false });
dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF', retain: false }); dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF', retain: false });
dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF', retain: false }); dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF', retain: false });
dispatch({ topic: `${name}/audio/state`, payload: audio.enabled ? 'ON' : 'OFF', retain: false });
}); });
}, [config]); }, [config]);
@ -120,6 +121,15 @@ export function useSnapshotsState(camera) {
return { payload, send, connected }; return { payload, send, connected };
} }
export function useAudioState(camera) {
const {
value: { payload },
send,
connected,
} = useWs(`${camera}/audio/state`, `${camera}/audio/set`);
return { payload, send, connected };
}
export function usePtzCommand(camera) { export function usePtzCommand(camera) {
const { const {
value: { payload }, value: { payload },

View File

@ -28,13 +28,18 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const scaledHeight = useMemo(() => { const scaledHeight = useMemo(() => {
const scaledHeight = Math.floor(availableWidth / aspectRatio); const scaledHeight = Math.floor(availableWidth / aspectRatio);
return stretch ? scaledHeight : Math.min(scaledHeight, height); const finalHeight = stretch ? scaledHeight : Math.min(scaledHeight, height);
if (finalHeight > 0) {
return finalHeight;
}
return 100;
}, [availableWidth, aspectRatio, height, stretch]); }, [availableWidth, aspectRatio, height, stretch]);
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [ const scaledWidth = useMemo(
scaledHeight, () => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth),
aspectRatio, [scaledHeight, aspectRatio, scrollBarWidth]
scrollBarWidth, );
]);
const img = useMemo(() => new Image(), []); const img = useMemo(() => new Image(), []);
img.onload = useCallback( img.onload = useCallback(
@ -58,18 +63,16 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
return ( return (
<div className="relative w-full" ref={containerRef}> <div className="relative w-full" ref={containerRef}>
{ {enabled ? (
(enabled) ?
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} /> <canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
: <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div> ) : (
} <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
{ )}
(!hasLoaded && enabled) ? ( {!hasLoaded && enabled ? (
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}> <div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
<ActivityIndicator /> <ActivityIndicator />
</div> </div>
) : null ) : null}
}
</div> </div>
); );
} }

View File

@ -25,9 +25,9 @@ const timeAgo = ({ time, currentTime = new Date(), dense = false }: IProp): stri
const elapsedTime: number = currentTime.getTime() - pastTime.getTime(); const elapsedTime: number = currentTime.getTime() - pastTime.getTime();
const timeUnits: TimeUnit[] = [ const timeUnits: TimeUnit[] = [
{ unit: 'ye', full: 'year', value: 31536000 }, { unit: 'yr', full: 'year', value: 31536000 },
{ unit: 'mo', full: 'month', value: 0 }, { unit: 'mo', full: 'month', value: 0 },
{ unit: 'day', full: 'day', value: 86400 }, { unit: 'd', full: 'day', value: 86400 },
{ unit: 'h', full: 'hour', value: 3600 }, { unit: 'h', full: 'hour', value: 3600 },
{ unit: 'm', full: 'minute', value: 60 }, { unit: 'm', full: 'minute', value: 60 },
{ unit: 's', full: 'second', value: 1 }, { unit: 's', full: 'second', value: 1 },
@ -58,11 +58,11 @@ const timeAgo = ({ time, currentTime = new Date(), dense = false }: IProp): stri
if (monthDiff > 0) { if (monthDiff > 0) {
const unitAmount = monthDiff; const unitAmount = monthDiff;
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`; return `${unitAmount}${dense ? timeUnits[i].unit : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
} }
} else if (elapsed >= timeUnits[i].value) { } else if (elapsed >= timeUnits[i].value) {
const unitAmount: number = Math.floor(elapsed / timeUnits[i].value); const unitAmount: number = Math.floor(elapsed / timeUnits[i].value);
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`; return `${unitAmount}${dense ? timeUnits[i].unit : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
} }
} }
return 'Invalid Time'; return 'Invalid Time';

View File

@ -0,0 +1,65 @@
import { Fragment, h } from 'preact';
import { useState } from 'preact/hooks';
export default function TimelineEventOverlay({ eventOverlay, cameraConfig }) {
const boxLeftEdge = Math.round(eventOverlay.data.box[0] * 100);
const boxTopEdge = Math.round(eventOverlay.data.box[1] * 100);
const boxRightEdge = Math.round((1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100);
const boxBottomEdge = Math.round((1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100);
const [isHovering, setIsHovering] = useState(false);
const getHoverStyle = () => {
if (boxLeftEdge < 15) {
// show object stats on right side
return {
left: `${boxLeftEdge + eventOverlay.data.box[2] * 100 + 1}%`,
top: `${boxTopEdge}%`,
};
}
return {
right: `${boxRightEdge + eventOverlay.data.box[2] * 100 + 1}%`,
top: `${boxTopEdge}%`,
};
};
const getObjectArea = () => {
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
return Math.round(width * height);
};
const getObjectRatio = () => {
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
return Math.round(100 * (width / height)) / 100;
};
return (
<Fragment>
<div
className="absolute border-4 border-red-600"
onMouseEnter={() => setIsHovering(true)}
onMouseLeave={() => setIsHovering(false)}
onTouchStart={() => setIsHovering(true)}
onTouchEnd={() => setIsHovering(false)}
style={{
left: `${boxLeftEdge}%`,
top: `${boxTopEdge}%`,
right: `${boxRightEdge}%`,
bottom: `${boxBottomEdge}%`,
}}
>
{eventOverlay.class_type == 'entered_zone' ? (
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
) : null}
</div>
{isHovering && (
<div className="absolute bg-white dark:bg-slate-800 p-4 block dark:text-white text-lg" style={getHoverStyle()}>
<div>{`Area: ${getObjectArea()} px`}</div>
<div>{`Ratio: ${getObjectRatio()}`}</div>
</div>
)}
</Fragment>
);
}

View File

@ -29,7 +29,7 @@ export default function Tooltip({ relativeTo, text }) {
let newLeft = left - Math.round(tipWidth / 2); let newLeft = left - Math.round(tipWidth / 2);
// too far right // too far right
if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) { if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) {
newLeft = left - tipWidth - TIP_SPACE; newLeft = Math.max(0, left - tipWidth - TIP_SPACE);
newTop = top - Math.round(tipHeight / 2); newTop = top - Math.round(tipHeight / 2);
} }
// too far left // too far left

36
web/src/icons/Audio.jsx Normal file
View File

@ -0,0 +1,36 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onClick = () => {} }) {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
className={className}
fill="none"
viewBox="0 0 32 32"
stroke={stroke}
onClick={onClick}
>
<path
stroke-linecap="round"
stroke-linejoin="round"
stroke-width="2"
d="M18 30v-2a10.011 10.011 0 0010-10h2a12.013 12.013 0 01-12 12z"
/>
<path
stroke-linecap="round"
stroke-linejoin="round"
stroke-width="2"
d="M18 22v-2a2.002 2.002 0 002-2h2a4.004 4.004 0 01-4 4zM10 2a9.01 9.01 0 00-9 9h2a7 7 0 0114 0 7.09 7.09 0 01-3.501 6.135l-.499.288v3.073a2.935 2.935 0 01-.9 2.151 4.182 4.182 0 01-4.633 1.03A4.092 4.092 0 015 20H3a6.116 6.116 0 003.67 5.512 5.782 5.782 0 002.314.486 6.585 6.585 0 004.478-1.888A4.94 4.94 0 0015 20.496v-1.942A9.108 9.108 0 0019 11a9.01 9.01 0 00-9-9z"
/>
<path
stroke-linecap="round"
stroke-linejoin="round"
stroke-width="2"
d="M9.28 8.082A3.006 3.006 0 0113 11h2a4.979 4.979 0 00-1.884-3.911 5.041 5.041 0 00-4.281-.957 4.95 4.95 0 00-3.703 3.703 5.032 5.032 0 002.304 5.458A3.078 3.078 0 019 17.924V20h2v-2.077a5.06 5.06 0 00-2.537-4.346 3.002 3.002 0 01.817-5.494z"
/>
</svg>
);
}
export default memo(Snapshot);

View File

@ -22,6 +22,7 @@ const emptyObject = Object.freeze({});
export default function Camera({ camera }) { export default function Camera({ camera }) {
const { data: config } = useSWR('config'); const { data: config } = useSWR('config');
const { data: trackedLabels } = useSWR(['labels', { camera }]);
const apiHost = useApiHost(); const apiHost = useApiHost();
const [showSettings, setShowSettings] = useState(false); const [showSettings, setShowSettings] = useState(false);
const [viewMode, setViewMode] = useState('live'); const [viewMode, setViewMode] = useState('live');
@ -121,7 +122,9 @@ export default function Camera({ camera }) {
<div className="max-w-5xl"> <div className="max-w-5xl">
<video-stream <video-stream
mode="mse" mode="mse"
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)} src={
new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)
}
/> />
</div> </div>
</Fragment> </Fragment>
@ -203,7 +206,7 @@ export default function Camera({ camera }) {
<div className="space-y-4"> <div className="space-y-4">
<Heading size="sm">Tracked objects</Heading> <Heading size="sm">Tracked objects</Heading>
<div className="flex flex-wrap justify-start"> <div className="flex flex-wrap justify-start">
{cameraConfig.objects.track.map((objectType) => ( {(trackedLabels || []).map((objectType) => (
<Card <Card
className="mb-4 mr-4" className="mb-4 mr-4"
key={objectType} key={objectType}

View File

@ -2,10 +2,11 @@ import { h, Fragment } from 'preact';
import ActivityIndicator from '../components/ActivityIndicator'; import ActivityIndicator from '../components/ActivityIndicator';
import Card from '../components/Card'; import Card from '../components/Card';
import CameraImage from '../components/CameraImage'; import CameraImage from '../components/CameraImage';
import AudioIcon from '../icons/Audio';
import ClipIcon from '../icons/Clip'; import ClipIcon from '../icons/Clip';
import MotionIcon from '../icons/Motion'; import MotionIcon from '../icons/Motion';
import SnapshotIcon from '../icons/Snapshot'; import SnapshotIcon from '../icons/Snapshot';
import { useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws'; import { useAudioState, useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws';
import { useMemo } from 'preact/hooks'; import { useMemo } from 'preact/hooks';
import useSWR from 'swr'; import useSWR from 'swr';
@ -43,6 +44,7 @@ function Camera({ name, config }) {
const { payload: detectValue, send: sendDetect } = useDetectState(name); const { payload: detectValue, send: sendDetect } = useDetectState(name);
const { payload: recordValue, send: sendRecordings } = useRecordingsState(name); const { payload: recordValue, send: sendRecordings } = useRecordingsState(name);
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name); const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
const { payload: audioValue, send: sendAudio } = useAudioState(name);
const href = `/cameras/${name}`; const href = `/cameras/${name}`;
const buttons = useMemo(() => { const buttons = useMemo(() => {
return [ return [
@ -50,10 +52,9 @@ function Camera({ name, config }) {
{ name: 'Recordings', href: `/recording/${name}` }, { name: 'Recordings', href: `/recording/${name}` },
]; ];
}, [name]); }, [name]);
const cleanName = useMemo( const cleanName = useMemo(() => {
() => { return `${name.replaceAll('_', ' ')}` }, return `${name.replaceAll('_', ' ')}`;
[name] }, [name]);
);
const icons = useMemo( const icons = useMemo(
() => [ () => [
{ {
@ -65,7 +66,9 @@ function Camera({ name, config }) {
}, },
}, },
{ {
name: config.record.enabled_in_config ? `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}` : 'Recordings must be enabled in the config to be turned on in the UI.', name: config.record.enabled_in_config
? `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}`
: 'Recordings must be enabled in the config to be turned on in the UI.',
icon: ClipIcon, icon: ClipIcon,
color: config.record.enabled_in_config ? (recordValue === 'ON' ? 'blue' : 'gray') : 'red', color: config.record.enabled_in_config ? (recordValue === 'ON' ? 'blue' : 'gray') : 'red',
onClick: () => { onClick: () => {
@ -82,11 +85,27 @@ function Camera({ name, config }) {
sendSnapshots(snapshotValue === 'ON' ? 'OFF' : 'ON', true); sendSnapshots(snapshotValue === 'ON' ? 'OFF' : 'ON', true);
}, },
}, },
], config.audio.enabled_in_config
[config, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots] ? {
name: `Toggle audio detection ${audioValue === 'ON' ? 'off' : 'on'}`,
icon: AudioIcon,
color: audioValue === 'ON' ? 'blue' : 'gray',
onClick: () => {
sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true);
},
}
: null,
].filter((button) => button != null),
[config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
); );
return ( return (
<Card buttons={buttons} href={href} header={cleanName} icons={icons} media={<CameraImage camera={name} stretch />} /> <Card
buttons={buttons}
href={href}
header={cleanName}
icons={icons}
media={<CameraImage camera={name} stretch />}
/>
); );
} }

View File

@ -29,6 +29,7 @@ import { formatUnixTimestampToDateTime, getDurationFromTimestamps } from '../uti
import TimeAgo from '../components/TimeAgo'; import TimeAgo from '../components/TimeAgo';
import Timepicker from '../components/TimePicker'; import Timepicker from '../components/TimePicker';
import TimelineSummary from '../components/TimelineSummary'; import TimelineSummary from '../components/TimelineSummary';
import TimelineEventOverlay from '../components/TimelineEventOverlay';
const API_LIMIT = 25; const API_LIMIT = 25;
@ -106,6 +107,7 @@ export default function Events({ path, ...props }) {
const { data: config } = useSWR('config'); const { data: config } = useSWR('config');
const { data: allLabels } = useSWR(['labels']);
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]); const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
const filterValues = useMemo( const filterValues = useMemo(
@ -120,15 +122,10 @@ export default function Events({ path, ...props }) {
.filter((value, i, self) => self.indexOf(value) === i), .filter((value, i, self) => self.indexOf(value) === i),
'None', 'None',
], ],
labels: Object.values(config?.cameras || {}) labels: Object.values(allLabels || {}),
.reduce((memo, camera) => {
memo = memo.concat(camera?.objects?.track || []);
return memo;
}, config?.objects?.track || [])
.filter((value, i, self) => self.indexOf(value) === i),
sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [], sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [],
}), }),
[config, allSubLabels] [config, allLabels, allSubLabels]
); );
const onSave = async (e, eventId, save) => { const onSave = async (e, eventId, save) => {
@ -721,23 +718,10 @@ export default function Events({ path, ...props }) {
}} }}
> >
{eventOverlay ? ( {eventOverlay ? (
<div <TimelineEventOverlay
className="absolute border-4 border-red-600" eventOverlay={eventOverlay}
style={{ cameraConfig={config.cameras[event.camera]}
left: `${Math.round(eventOverlay.data.box[0] * 100)}%`, />
top: `${Math.round(eventOverlay.data.box[1] * 100)}%`,
right: `${Math.round(
(1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100
)}%`,
bottom: `${Math.round(
(1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100
)}%`,
}}
>
{eventOverlay.class_type == 'entered_zone' ? (
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
) : null}
</div>
) : null} ) : null}
</VideoPlayer> </VideoPlayer>
</div> </div>

View File

@ -18,9 +18,9 @@ export default function Export() {
const localISODate = localDate.toISOString().split('T')[0]; const localISODate = localDate.toISOString().split('T')[0];
const [startDate, setStartDate] = useState(localISODate); const [startDate, setStartDate] = useState(localISODate);
const [startTime, setStartTime] = useState("00:00"); const [startTime, setStartTime] = useState('00:00');
const [endDate, setEndDate] = useState(localISODate); const [endDate, setEndDate] = useState(localISODate);
const [endTime, setEndTime] = useState("23:59"); const [endTime, setEndTime] = useState('23:59');
const onHandleExport = () => { const onHandleExport = () => {
if (camera == 'select') { if (camera == 'select') {
@ -33,8 +33,6 @@ export default function Export() {
return; return;
} }
if (!startDate || !startTime || !endDate || !endTime) { if (!startDate || !startTime || !endDate || !endTime) {
setMessage({ text: 'A start and end time needs to be selected', error: true }); setMessage({ text: 'A start and end time needs to be selected', error: true });
return; return;
@ -48,12 +46,13 @@ export default function Export() {
return; return;
} }
axios.post(`export/${camera}/start/${start}/end/${end}`, { playback }) axios
.post(`export/${camera}/start/${start}/end/${end}`, { playback })
.then(() => { .then(() => {
setMessage({ text: 'Successfully started export. View the file in the /exports folder.', error: false }); setMessage({ text: 'Successfully started export. View the file in the /exports folder.', error: false });
}) })
.catch((error) => { .catch((error) => {
setMessage({ text: 'Failed to start export: '+error.response.data.message, error: true }); setMessage({ text: `Failed to start export: ${error.response.data.message}`, error: true });
}); });
}; };
@ -93,13 +92,37 @@ export default function Export() {
<Heading className="py-2" size="sm"> <Heading className="py-2" size="sm">
From: From:
</Heading> </Heading>
<input className="dark:bg-slate-800" id="startDate" type="date" value={startDate} onChange={(e) => setStartDate(e.target.value)}/> <input
<input className="dark:bg-slate-800" id="startTime" type="time" value={startTime} onChange={(e) => setStartTime(e.target.value)}/> className="dark:bg-slate-800"
id="startDate"
type="date"
value={startDate}
onChange={(e) => setStartDate(e.target.value)}
/>
<input
className="dark:bg-slate-800"
id="startTime"
type="time"
value={startTime}
onChange={(e) => setStartTime(e.target.value)}
/>
<Heading className="py-2" size="sm"> <Heading className="py-2" size="sm">
To: To:
</Heading> </Heading>
<input className="dark:bg-slate-800" id="endDate" type="date" value={endDate} onChange={(e) => setEndDate(e.target.value)}/> <input
<input className="dark:bg-slate-800" id="endTime" type="time" value={endTime} onChange={(e) => setEndTime(e.target.value)}/> className="dark:bg-slate-800"
id="endDate"
type="date"
value={endDate}
onChange={(e) => setEndDate(e.target.value)}
/>
<input
className="dark:bg-slate-800"
id="endTime"
type="time"
value={endTime}
onChange={(e) => setEndTime(e.target.value)}
/>
</div> </div>
<Button onClick={() => onHandleExport()}>Submit</Button> <Button onClick={() => onHandleExport()}>Submit</Button>
</div> </div>

View File

@ -334,7 +334,7 @@ export default function System() {
<ActivityIndicator /> <ActivityIndicator />
) : ( ) : (
<div data-testid="cameras" className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4"> <div data-testid="cameras" className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4">
{cameraNames.map((camera) => ( {cameraNames.map((camera) => ( config.cameras[camera]["enabled"] && (
<div key={camera} className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow"> <div key={camera} className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow">
<div className="capitalize text-lg flex justify-between p-4"> <div className="capitalize text-lg flex justify-between p-4">
<Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link> <Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link>
@ -406,7 +406,7 @@ export default function System() {
</Tbody> </Tbody>
</Table> </Table>
</div> </div>
</div> </div> )
))} ))}
</div> </div>
)} )}