mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 18:55:23 +03:00
Merge branch 'dev' of https://github.com/blakeblackshear/frigate into advancedoptionsui
This commit is contained in:
commit
f5334f499f
19
Dockerfile
19
Dockerfile
@ -18,10 +18,13 @@ WORKDIR /rootfs
|
||||
|
||||
FROM base AS nginx
|
||||
ARG DEBIAN_FRONTEND
|
||||
ENV CCACHE_DIR /root/.ccache
|
||||
ENV CCACHE_MAXSIZE 2G
|
||||
|
||||
# bind /var/cache/apt to tmpfs to speed up nginx build
|
||||
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
--mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \
|
||||
--mount=type=cache,target=/root/.ccache \
|
||||
/deps/build_nginx.sh
|
||||
|
||||
FROM wget AS go2rtc
|
||||
@ -61,14 +64,16 @@ RUN mkdir /models \
|
||||
FROM wget as libusb-build
|
||||
ARG TARGETARCH
|
||||
ARG DEBIAN_FRONTEND
|
||||
ENV CCACHE_DIR /root/.ccache
|
||||
ENV CCACHE_MAXSIZE 2G
|
||||
|
||||
# Build libUSB without udev. Needed for Openvino NCS2 support
|
||||
WORKDIR /opt
|
||||
RUN apt-get update && apt-get install -y unzip build-essential automake libtool
|
||||
RUN wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
|
||||
RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache
|
||||
RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \
|
||||
unzip v1.0.25.zip && cd libusb-1.0.25 && \
|
||||
./bootstrap.sh && \
|
||||
./configure --disable-udev --enable-shared && \
|
||||
./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \
|
||||
make -j $(nproc --all)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends libusb-1.0-0-dev && \
|
||||
@ -93,7 +98,9 @@ COPY labelmap.txt .
|
||||
COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model
|
||||
RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \
|
||||
sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt
|
||||
|
||||
# Get Audio Model and labels
|
||||
RUN wget -qO cpu_audio_model.tflite https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1?lite-format=tflite
|
||||
COPY audio-labelmap.txt .
|
||||
|
||||
|
||||
FROM wget AS s6-overlay
|
||||
@ -127,7 +134,9 @@ RUN apt-get -qq update \
|
||||
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
|
||||
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
||||
# scipy dependencies
|
||||
gcc gfortran libopenblas-dev liblapack-dev && \
|
||||
gcc gfortran libopenblas-dev liblapack-dev \
|
||||
# faster-fifo dependencies
|
||||
g++ cython3 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
|
||||
521
audio-labelmap.txt
Normal file
521
audio-labelmap.txt
Normal file
@ -0,0 +1,521 @@
|
||||
speech
|
||||
speech
|
||||
speech
|
||||
speech
|
||||
babbling
|
||||
speech
|
||||
yell
|
||||
bellow
|
||||
whoop
|
||||
yell
|
||||
yell
|
||||
yell
|
||||
whispering
|
||||
laughter
|
||||
laughter
|
||||
laughter
|
||||
snicker
|
||||
laughter
|
||||
laughter
|
||||
crying
|
||||
crying
|
||||
crying
|
||||
yell
|
||||
sigh
|
||||
singing
|
||||
choir
|
||||
sodeling
|
||||
chant
|
||||
mantra
|
||||
child_singing
|
||||
synthetic_singing
|
||||
rapping
|
||||
humming
|
||||
groan
|
||||
grunt
|
||||
whistling
|
||||
breathing
|
||||
wheeze
|
||||
snoring
|
||||
gasp
|
||||
pant
|
||||
snort
|
||||
cough
|
||||
throat_clearing
|
||||
sneeze
|
||||
sniff
|
||||
run
|
||||
shuffle
|
||||
footsteps
|
||||
chewing
|
||||
biting
|
||||
gargling
|
||||
stomach_rumble
|
||||
burping
|
||||
hiccup
|
||||
fart
|
||||
hands
|
||||
finger_snapping
|
||||
clapping
|
||||
heartbeat
|
||||
heart_murmur
|
||||
cheering
|
||||
applause
|
||||
chatter
|
||||
crowd
|
||||
speech
|
||||
children_playing
|
||||
animal
|
||||
pets
|
||||
dog
|
||||
bark
|
||||
yip
|
||||
howl
|
||||
bow-wow
|
||||
growling
|
||||
whimper_dog
|
||||
cat
|
||||
purr
|
||||
meow
|
||||
hiss
|
||||
caterwaul
|
||||
livestock
|
||||
horse
|
||||
clip-clop
|
||||
neigh
|
||||
cattle
|
||||
moo
|
||||
cowbell
|
||||
pig
|
||||
oink
|
||||
goat
|
||||
bleat
|
||||
sheep
|
||||
fowl
|
||||
chicken
|
||||
cluck
|
||||
cock-a-doodle-doo
|
||||
turkey
|
||||
gobble
|
||||
duck
|
||||
quack
|
||||
goose
|
||||
honk
|
||||
wild_animals
|
||||
roaring_cats
|
||||
roar
|
||||
bird
|
||||
chird
|
||||
chirp
|
||||
squawk
|
||||
pigeon
|
||||
coo
|
||||
crow
|
||||
caw
|
||||
owl
|
||||
hoot
|
||||
flapping_wings
|
||||
dogs
|
||||
rats
|
||||
mouse
|
||||
patter
|
||||
insect
|
||||
cricket
|
||||
mosquito
|
||||
fly
|
||||
buzz
|
||||
buzz
|
||||
frog
|
||||
croak
|
||||
snake
|
||||
rattle
|
||||
whale_vocalization
|
||||
music
|
||||
musical_instrument
|
||||
plucked_string_instrument
|
||||
guitar
|
||||
electric_guitar
|
||||
bass_guitar
|
||||
acoustic_guitar
|
||||
steel_guitar
|
||||
tapping
|
||||
strum
|
||||
banjo
|
||||
sitar
|
||||
mandolin
|
||||
zither
|
||||
ukulele
|
||||
keyboard
|
||||
piano
|
||||
electric_piano
|
||||
organ
|
||||
electronic_organ
|
||||
hammond_organ
|
||||
synthesizer
|
||||
sampler
|
||||
harpsichord
|
||||
percussion
|
||||
drum_kit
|
||||
drum_machine
|
||||
drum
|
||||
snare_drum
|
||||
rimshot
|
||||
drum_roll
|
||||
bass_drum
|
||||
timpani
|
||||
tabla
|
||||
cymbal
|
||||
hi-hat
|
||||
wood_block
|
||||
tambourine
|
||||
rattle
|
||||
maraca
|
||||
gong
|
||||
tubular_bells
|
||||
mallet_percussion
|
||||
marimba
|
||||
glockenspiel
|
||||
vibraphone
|
||||
steelpan
|
||||
orchestra
|
||||
brass_instrument
|
||||
french_horn
|
||||
trumpet
|
||||
trombone
|
||||
bowed_string_instrument
|
||||
string_section
|
||||
violin
|
||||
pizzicato
|
||||
cello
|
||||
double_bass
|
||||
wind_instrument
|
||||
flute
|
||||
saxophone
|
||||
clarinet
|
||||
harp
|
||||
bell
|
||||
church_bell
|
||||
jingle_bell
|
||||
bicycle_bell
|
||||
tuning_fork
|
||||
chime
|
||||
wind_chime
|
||||
change_ringing
|
||||
harmonica
|
||||
accordion
|
||||
bagpipes
|
||||
didgeridoo
|
||||
shofar
|
||||
theremin
|
||||
singing_bowl
|
||||
scratching
|
||||
pop_music
|
||||
hip_hop_music
|
||||
beatboxing
|
||||
rock_music
|
||||
heavy_metal
|
||||
punk_rock
|
||||
grunge
|
||||
progressive_rock
|
||||
rock_and_roll
|
||||
psychedelic_rock
|
||||
rhythm_and_blues
|
||||
soul_music
|
||||
reggae
|
||||
country
|
||||
swing_music
|
||||
bluegrass
|
||||
funk
|
||||
folk_music
|
||||
middle_eastern_music
|
||||
jazz
|
||||
disco
|
||||
classical_music
|
||||
opera
|
||||
electronic_music
|
||||
house_music
|
||||
techno
|
||||
dubstep
|
||||
drum_and_bass
|
||||
electronica
|
||||
electronic_dance_music
|
||||
ambient_music
|
||||
trance_music
|
||||
music_of_latin_america
|
||||
salsa_music
|
||||
flamenco
|
||||
blues
|
||||
music_for_children
|
||||
new-age_music
|
||||
vocal_music
|
||||
a_capella
|
||||
music_of_africa
|
||||
afrobeat
|
||||
christian_music
|
||||
gospel_music
|
||||
music_of_asia
|
||||
carnatic_music
|
||||
music_of_bollywood
|
||||
ska
|
||||
traditional_music
|
||||
independent_music
|
||||
song
|
||||
background_music
|
||||
theme_music
|
||||
jingle
|
||||
soundtrack_music
|
||||
lullaby
|
||||
video_game_music
|
||||
christmas_music
|
||||
dance_music
|
||||
wedding_music
|
||||
happy_music
|
||||
sad_music
|
||||
tender_music
|
||||
exciting_music
|
||||
angry_music
|
||||
scary_music
|
||||
wind
|
||||
rustling_leaves
|
||||
wind_noise
|
||||
thunderstorm
|
||||
thunder
|
||||
water
|
||||
rain
|
||||
raindrop
|
||||
rain_on_surface
|
||||
stream
|
||||
waterfall
|
||||
ocean
|
||||
waves
|
||||
steam
|
||||
gurgling
|
||||
fire
|
||||
crackle
|
||||
vehicle
|
||||
boat
|
||||
sailboat
|
||||
rowboat
|
||||
motorboat
|
||||
ship
|
||||
motor_vehicle
|
||||
car
|
||||
honk
|
||||
toot
|
||||
car_alarm
|
||||
power_windows
|
||||
skidding
|
||||
tire_squeal
|
||||
car_passing_by
|
||||
race_car
|
||||
truck
|
||||
air_brake
|
||||
air_horn
|
||||
reversing_beeps
|
||||
ice_cream_truck
|
||||
bus
|
||||
emergency_vehicle
|
||||
police_car
|
||||
ambulance
|
||||
fire_engine
|
||||
motorcycle
|
||||
traffic_noise
|
||||
rail_transport
|
||||
train
|
||||
train_whistle
|
||||
train_horn
|
||||
railroad_car
|
||||
train_wheels_squealing
|
||||
subway
|
||||
aircraft
|
||||
aircraft_engine
|
||||
jet_engine
|
||||
propeller
|
||||
helicopter
|
||||
fixed-wing_aircraft
|
||||
bicycle
|
||||
skateboard
|
||||
engine
|
||||
light_engine
|
||||
dental_drill's_drill
|
||||
lawn_mower
|
||||
chainsaw
|
||||
medium_engine
|
||||
heavy_engine
|
||||
engine_knocking
|
||||
engine_starting
|
||||
idling
|
||||
accelerating
|
||||
door
|
||||
doorbell
|
||||
ding-dong
|
||||
sliding_door
|
||||
slam
|
||||
knock
|
||||
tap
|
||||
squeak
|
||||
cupboard_open_or_close
|
||||
drawer_open_or_close
|
||||
dishes
|
||||
cutlery
|
||||
chopping
|
||||
frying
|
||||
microwave_oven
|
||||
blender
|
||||
water_tap
|
||||
sink
|
||||
bathtub
|
||||
hair_dryer
|
||||
toilet_flush
|
||||
toothbrush
|
||||
electric_toothbrush
|
||||
vacuum_cleaner
|
||||
zipper
|
||||
keys_jangling
|
||||
coin
|
||||
scissors
|
||||
electric_shaver
|
||||
shuffling_cards
|
||||
typing
|
||||
typewriter
|
||||
computer_keyboard
|
||||
writing
|
||||
alarm
|
||||
telephone
|
||||
telephone_bell_ringing
|
||||
ringtone
|
||||
telephone_dialing
|
||||
dial_tone
|
||||
busy_signal
|
||||
alarm_clock
|
||||
siren
|
||||
civil_defense_siren
|
||||
buzzer
|
||||
smoke_detector
|
||||
fire_alarm
|
||||
foghorn
|
||||
whistle
|
||||
steam_whistle
|
||||
mechanisms
|
||||
ratchet
|
||||
clock
|
||||
tick
|
||||
tick-tock
|
||||
gears
|
||||
pulleys
|
||||
sewing_machine
|
||||
mechanical_fan
|
||||
air_conditioning
|
||||
cash_register
|
||||
printer
|
||||
camera
|
||||
single-lens_reflex_camera
|
||||
tools
|
||||
hammer
|
||||
jackhammer
|
||||
sawing
|
||||
filing
|
||||
sanding
|
||||
power_tool
|
||||
drill
|
||||
explosion
|
||||
gunshot
|
||||
machine_gun
|
||||
fusillade
|
||||
artillery_fire
|
||||
cap_gun
|
||||
fireworks
|
||||
firecracker
|
||||
burst
|
||||
eruption
|
||||
boom
|
||||
wood
|
||||
chop
|
||||
splinter
|
||||
crack
|
||||
glass
|
||||
chink
|
||||
shatter
|
||||
liquid
|
||||
splash
|
||||
slosh
|
||||
squish
|
||||
drip
|
||||
pour
|
||||
trickle
|
||||
gush
|
||||
fill
|
||||
spray
|
||||
pump
|
||||
stir
|
||||
boiling
|
||||
sonar
|
||||
arrow
|
||||
whoosh
|
||||
thump
|
||||
thunk
|
||||
electronic_tuner
|
||||
effects_unit
|
||||
chorus_effect
|
||||
basketball_bounce
|
||||
bang
|
||||
slap
|
||||
whack
|
||||
smash
|
||||
breaking
|
||||
bouncing
|
||||
whip
|
||||
flap
|
||||
scratch
|
||||
scrape
|
||||
rub
|
||||
roll
|
||||
crushing
|
||||
crumpling
|
||||
tearing
|
||||
beep
|
||||
ping
|
||||
ding
|
||||
clang
|
||||
squeal
|
||||
creak
|
||||
rustle
|
||||
whir
|
||||
clatter
|
||||
sizzle
|
||||
clicking
|
||||
clickety-clack
|
||||
rumble
|
||||
plop
|
||||
jingle
|
||||
hum
|
||||
zing
|
||||
boing
|
||||
crunch
|
||||
silence
|
||||
sine_wave
|
||||
harmonic
|
||||
chirp_tone
|
||||
sound_effect
|
||||
pulse
|
||||
inside
|
||||
inside
|
||||
inside
|
||||
outside
|
||||
outside
|
||||
reverberation
|
||||
echo
|
||||
noise
|
||||
environmental_noise
|
||||
static
|
||||
mains_hum
|
||||
distortion
|
||||
sidetone
|
||||
cacophony
|
||||
white_noise
|
||||
pink_noise
|
||||
throbbing
|
||||
vibration
|
||||
television
|
||||
radio
|
||||
field_recording
|
||||
@ -12,16 +12,32 @@ from frigate.util import create_mask
|
||||
# get info on the video
|
||||
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
||||
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
|
||||
cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4")
|
||||
cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4")
|
||||
# cap = cv2.VideoCapture("airport.mp4")
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
frame_shape = (height, width, 3)
|
||||
# Nick back:
|
||||
# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0",
|
||||
# "310,350,300,402,224,405,241,354",
|
||||
# "378,0,375,26,0,23,0,0",
|
||||
# Front door:
|
||||
# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||
# "336,833,438,1024,346,1093,103,1052,24,814",
|
||||
# Back
|
||||
# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0",
|
||||
# "505,95,506,138,388,153,384,114",
|
||||
# "689,72,689,122,549,134,547,89",
|
||||
# "261,134,264,176,169,195,167,158",
|
||||
# "145,159,146,202,70,220,65,183",
|
||||
|
||||
mask = create_mask(
|
||||
(height, width),
|
||||
[],
|
||||
[
|
||||
"1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0",
|
||||
"336,833,438,1024,346,1093,103,1052,24,814",
|
||||
],
|
||||
)
|
||||
|
||||
# create the motion config
|
||||
@ -29,7 +45,7 @@ motion_config_1 = MotionConfig()
|
||||
motion_config_1.mask = np.zeros((height, width), np.uint8)
|
||||
motion_config_1.mask[:] = mask
|
||||
# motion_config_1.improve_contrast = 1
|
||||
# motion_config_1.frame_height = 150
|
||||
motion_config_1.frame_height = 150
|
||||
# motion_config_1.frame_alpha = 0.02
|
||||
# motion_config_1.threshold = 30
|
||||
# motion_config_1.contour_area = 10
|
||||
@ -38,10 +54,11 @@ motion_config_2 = MotionConfig()
|
||||
motion_config_2.mask = np.zeros((height, width), np.uint8)
|
||||
motion_config_2.mask[:] = mask
|
||||
# motion_config_2.improve_contrast = 1
|
||||
# motion_config_2.frame_height = 150
|
||||
motion_config_2.frame_height = 150
|
||||
# motion_config_2.frame_alpha = 0.01
|
||||
# motion_config_2.threshold = 20
|
||||
motion_config_2.threshold = 20
|
||||
# motion_config.contour_area = 10
|
||||
|
||||
save_images = True
|
||||
|
||||
improved_motion_detector_1 = ImprovedMotionDetector(
|
||||
@ -52,8 +69,6 @@ improved_motion_detector_1 = ImprovedMotionDetector(
|
||||
threshold=mp.Value("i", motion_config_1.threshold),
|
||||
contour_area=mp.Value("i", motion_config_1.contour_area),
|
||||
name="default",
|
||||
clipLimit=2.0,
|
||||
tileGridSize=(8, 8),
|
||||
)
|
||||
improved_motion_detector_1.save_images = save_images
|
||||
|
||||
|
||||
@ -15,6 +15,10 @@ apt-get -yqq build-dep nginx
|
||||
|
||||
apt-get -yqq install --no-install-recommends ca-certificates wget
|
||||
update-ca-certificates -f
|
||||
apt install -y ccache
|
||||
|
||||
export PATH="/usr/lib/ccache:$PATH"
|
||||
|
||||
mkdir /tmp/nginx
|
||||
wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz
|
||||
tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1
|
||||
@ -62,5 +66,5 @@ cd /tmp/nginx
|
||||
--add-module=../nginx-rtmp-module \
|
||||
--with-cc-opt="-O3 -Wno-error=implicit-fallthrough"
|
||||
|
||||
make -j$(nproc) && make install
|
||||
make CC="ccache gcc" -j$(nproc) && make install
|
||||
rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default
|
||||
|
||||
63
docs/docs/configuration/audio_detectors.md
Normal file
63
docs/docs/configuration/audio_detectors.md
Normal file
@ -0,0 +1,63 @@
|
||||
---
|
||||
id: audio_detectors
|
||||
title: Audio Detectors
|
||||
---
|
||||
|
||||
Frigate provides a builtin audio detector which runs on the CPU. Compared to object detection in images, audio detection is a relatively lightweight operation so the only option is to run the detection on a CPU.
|
||||
|
||||
## Configuration
|
||||
|
||||
Audio events work by detecting a type of audio and creating an event, the event will end once the type of audio has not been heard for the configured amount of time. Audio events save a snapshot at the beginning of the event as well as recordings throughout the event. The recordings are retained using the configured recording retention.
|
||||
|
||||
### Enabling Audio Events
|
||||
|
||||
Audio events can be enabled for all cameras or only for specific cameras.
|
||||
|
||||
```yaml
|
||||
|
||||
audio: # <- enable audio events for all camera
|
||||
enabled: True
|
||||
|
||||
cameras:
|
||||
front_camera:
|
||||
ffmpeg:
|
||||
...
|
||||
audio:
|
||||
enabled: True # <- enable audio events for the front_camera
|
||||
```
|
||||
|
||||
If you are using multiple streams then you must set the `audio` role on the stream that is going to be used for audio detection, this can be any stream but the stream must have audio included.
|
||||
|
||||
:::note
|
||||
|
||||
The ffmpeg process for capturing audio will be a separate connection to the camera along with the other roles assigned to the camera, for this reason it is recommended that the go2rtc restream is used for this purpose. See [the restream docs](/configuration/restream.md) for more information.
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_camera:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://.../main_stream
|
||||
roles:
|
||||
- record
|
||||
- path: rtsp://.../sub_stream # <- this stream must have audio enabled
|
||||
roles:
|
||||
- audio
|
||||
- detect
|
||||
```
|
||||
|
||||
### Configuring Audio Events
|
||||
|
||||
The included audio model has over 500 different types of audio that can be detected, many of which are not practical. By default `bark`, `speech`, `yell`, and `scream` are enabled but these can be customized.
|
||||
|
||||
```yaml
|
||||
audio:
|
||||
enabled: True
|
||||
listen:
|
||||
- bark
|
||||
- scream
|
||||
- speech
|
||||
- yell
|
||||
```
|
||||
@ -138,6 +138,20 @@ model:
|
||||
labelmap:
|
||||
2: vehicle
|
||||
|
||||
# Optional: Audio Events Configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
audio:
|
||||
# Optional: Enable audio events (default: shown below)
|
||||
enabled: False
|
||||
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
|
||||
max_not_heard: 30
|
||||
# Optional: Types of audio to listen for (default: shown below)
|
||||
listen:
|
||||
- bark
|
||||
- scream
|
||||
- speech
|
||||
- yell
|
||||
|
||||
# Optional: logger verbosity settings
|
||||
logger:
|
||||
# Optional: Default log verbosity (default: shown below)
|
||||
@ -189,6 +203,11 @@ ffmpeg:
|
||||
record: preset-record-generic
|
||||
# Optional: output args for rtmp streams (default: shown below)
|
||||
rtmp: preset-rtmp-generic
|
||||
# Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below)
|
||||
# If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once
|
||||
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
||||
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
||||
retry_interval: 10
|
||||
|
||||
# Optional: Detect configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@ -275,7 +294,7 @@ motion:
|
||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||
# The value should be between 1 and 255.
|
||||
threshold: 20
|
||||
threshold: 30
|
||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||
# needs to recalibrate. (default: shown below)
|
||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||
@ -448,10 +467,11 @@ cameras:
|
||||
# Required: the path to the stream
|
||||
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
|
||||
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
# Required: list of roles for this stream. valid values are: detect,record,rtmp
|
||||
# NOTICE: In addition to assigning the record and rtmp roles,
|
||||
# Required: list of roles for this stream. valid values are: audio,detect,record,rtmp
|
||||
# NOTICE: In addition to assigning the audio, record, and rtmp roles,
|
||||
# they must also be enabled in the camera config.
|
||||
roles:
|
||||
- audio
|
||||
- detect
|
||||
- record
|
||||
- rtmp
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
---
|
||||
id: detectors
|
||||
title: Detectors
|
||||
id: object_detectors
|
||||
title: Object Detectors
|
||||
---
|
||||
|
||||
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||
@ -275,6 +275,6 @@ detectors:
|
||||
api_timeout: 0.1 # seconds
|
||||
```
|
||||
|
||||
Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and port of your CodeProject.AI server.
|
||||
Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and port of your CodeProject.AI server.
|
||||
|
||||
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
|
||||
@ -67,6 +67,7 @@ cameras:
|
||||
roles:
|
||||
- record
|
||||
- detect
|
||||
- audio # <- only necessary if audio detection is enabled
|
||||
http_cam:
|
||||
ffmpeg:
|
||||
output_args:
|
||||
@ -77,6 +78,7 @@ cameras:
|
||||
roles:
|
||||
- record
|
||||
- detect
|
||||
- audio # <- only necessary if audio detection is enabled
|
||||
```
|
||||
|
||||
### With Sub Stream
|
||||
@ -112,6 +114,7 @@ cameras:
|
||||
- path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream
|
||||
input_args: preset-rtsp-restream
|
||||
roles:
|
||||
- audio # <- only necessary if audio detection is enabled
|
||||
- detect
|
||||
http_cam:
|
||||
ffmpeg:
|
||||
@ -125,6 +128,7 @@ cameras:
|
||||
- path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream
|
||||
input_args: preset-rtsp-restream
|
||||
roles:
|
||||
- audio # <- only necessary if audio detection is enabled
|
||||
- detect
|
||||
```
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ The OpenVINO detector type is able to run on:
|
||||
- 6th Gen Intel Platforms and newer that have an iGPU
|
||||
- x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2)
|
||||
|
||||
More information is available [in the detector docs](/configuration/detectors#openvino-detector)
|
||||
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
|
||||
|
||||
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
|
||||
|
||||
@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
|
||||
|
||||
### TensorRT
|
||||
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/detectors#nvidia-tensorrt-detector).
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
@ -71,7 +71,7 @@ cameras:
|
||||
...
|
||||
```
|
||||
|
||||
More details on available detectors can be found [here](../configuration/detectors.md).
|
||||
More details on available detectors can be found [here](../configuration/object_detectors.md).
|
||||
|
||||
Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference).
|
||||
|
||||
|
||||
@ -109,11 +109,19 @@ Same data available at `/api/stats` published at a configurable interval.
|
||||
|
||||
### `frigate/<camera_name>/detect/set`
|
||||
|
||||
Topic to turn detection for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/detect/state`
|
||||
|
||||
Topic with current state of detection for a camera. Published values are `ON` and `OFF`.
|
||||
Topic with current state of object detection for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/audio/set`
|
||||
|
||||
Topic to turn audio detection for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/audio/state`
|
||||
|
||||
Topic with current state of audio detection for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/recordings/set`
|
||||
|
||||
|
||||
@ -16,7 +16,8 @@ module.exports = {
|
||||
],
|
||||
Configuration: [
|
||||
"configuration/index",
|
||||
"configuration/detectors",
|
||||
"configuration/object_detectors",
|
||||
"configuration/audio_detectors",
|
||||
"configuration/cameras",
|
||||
"configuration/masks",
|
||||
"configuration/record",
|
||||
|
||||
103
frigate/app.py
103
frigate/app.py
@ -6,12 +6,12 @@ import shutil
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from multiprocessing.queues import Queue
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
import psutil
|
||||
from faster_fifo import Queue
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
@ -29,6 +29,7 @@ from frigate.const import (
|
||||
MODEL_CACHE_DIR,
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.events.audio import listen_to_audio
|
||||
from frigate.events.cleanup import EventCleanup
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.events.maintainer import EventProcessor
|
||||
@ -44,7 +45,7 @@ from frigate.record.record import manage_recordings
|
||||
from frigate.stats import StatsEmitter, stats_init
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.timeline import TimelineProcessor
|
||||
from frigate.types import CameraMetricsTypes, RecordMetricsTypes
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.version import VERSION
|
||||
from frigate.video import capture_camera, track_camera
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
@ -62,7 +63,7 @@ class FrigateApp:
|
||||
self.log_queue: Queue = mp.Queue()
|
||||
self.plus_api = PlusApi()
|
||||
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
|
||||
self.record_metrics: dict[str, RecordMetricsTypes] = {}
|
||||
self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
|
||||
self.processes: dict[str, int] = {}
|
||||
|
||||
def set_environment_vars(self) -> None:
|
||||
@ -104,37 +105,74 @@ class FrigateApp:
|
||||
user_config = FrigateConfig.parse_file(config_file)
|
||||
self.config = user_config.runtime_config(self.plus_api)
|
||||
|
||||
for camera_name in self.config.cameras.keys():
|
||||
for camera_name, camera_config in self.config.cameras.items():
|
||||
# create camera_metrics
|
||||
self.camera_metrics[camera_name] = {
|
||||
"camera_fps": mp.Value("d", 0.0),
|
||||
"skipped_fps": mp.Value("d", 0.0),
|
||||
"process_fps": mp.Value("d", 0.0),
|
||||
"detection_enabled": mp.Value(
|
||||
"i", self.config.cameras[camera_name].detect.enabled
|
||||
"camera_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"skipped_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].detect.enabled,
|
||||
),
|
||||
"motion_enabled": mp.Value("i", True),
|
||||
"improve_contrast_enabled": mp.Value(
|
||||
"i", self.config.cameras[camera_name].motion.improve_contrast
|
||||
"motion_enabled": mp.Value("i", True), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"improve_contrast_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].motion.improve_contrast,
|
||||
),
|
||||
"motion_threshold": mp.Value(
|
||||
"i", self.config.cameras[camera_name].motion.threshold
|
||||
"motion_threshold": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].motion.threshold,
|
||||
),
|
||||
"motion_contour_area": mp.Value(
|
||||
"i", self.config.cameras[camera_name].motion.contour_area
|
||||
"motion_contour_area": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].motion.contour_area,
|
||||
),
|
||||
"detection_fps": mp.Value("d", 0.0),
|
||||
"detection_frame": mp.Value("d", 0.0),
|
||||
"read_start": mp.Value("d", 0.0),
|
||||
"ffmpeg_pid": mp.Value("i", 0),
|
||||
"detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_frame": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"read_start": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"frame_queue": mp.Queue(maxsize=2),
|
||||
"capture_process": None,
|
||||
"process": None,
|
||||
}
|
||||
self.record_metrics[camera_name] = {
|
||||
"record_enabled": mp.Value(
|
||||
"i", self.config.cameras[camera_name].record.enabled
|
||||
)
|
||||
self.feature_metrics[camera_name] = {
|
||||
"audio_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].audio.enabled,
|
||||
),
|
||||
"record_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].record.enabled,
|
||||
),
|
||||
}
|
||||
|
||||
def set_log_levels(self) -> None:
|
||||
@ -222,7 +260,7 @@ class FrigateApp:
|
||||
recording_process = mp.Process(
|
||||
target=manage_recordings,
|
||||
name="recording_manager",
|
||||
args=(self.config, self.recordings_info_queue, self.record_metrics),
|
||||
args=(self.config, self.recordings_info_queue, self.feature_metrics),
|
||||
)
|
||||
recording_process.daemon = True
|
||||
self.recording_process = recording_process
|
||||
@ -281,7 +319,7 @@ class FrigateApp:
|
||||
self.config,
|
||||
self.onvif_controller,
|
||||
self.camera_metrics,
|
||||
self.record_metrics,
|
||||
self.feature_metrics,
|
||||
comms,
|
||||
)
|
||||
|
||||
@ -390,6 +428,18 @@ class FrigateApp:
|
||||
capture_process.start()
|
||||
logger.info(f"Capture process started for {name}: {capture_process.pid}")
|
||||
|
||||
def start_audio_processors(self) -> None:
|
||||
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0:
|
||||
audio_process = mp.Process(
|
||||
target=listen_to_audio,
|
||||
name="audio_capture",
|
||||
args=(self.config, self.feature_metrics),
|
||||
)
|
||||
audio_process.daemon = True
|
||||
audio_process.start()
|
||||
self.processes["audioDetector"] = audio_process.pid or 0
|
||||
logger.info(f"Audio process started: {audio_process.pid}")
|
||||
|
||||
def start_timeline_processor(self) -> None:
|
||||
self.timeline_processor = TimelineProcessor(
|
||||
self.config, self.timeline_queue, self.stop_event
|
||||
@ -486,6 +536,7 @@ class FrigateApp:
|
||||
self.start_detected_frames_processor()
|
||||
self.start_camera_processors()
|
||||
self.start_camera_capture_processes()
|
||||
self.start_audio_processors()
|
||||
self.start_storage_maintainer()
|
||||
self.init_stats()
|
||||
self.init_external_event_processor()
|
||||
|
||||
@ -6,7 +6,7 @@ from typing import Any, Callable
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.ptz import OnvifCommandEnum, OnvifController
|
||||
from frigate.types import CameraMetricsTypes, RecordMetricsTypes
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.util import restart_frigate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -39,19 +39,20 @@ class Dispatcher:
|
||||
config: FrigateConfig,
|
||||
onvif: OnvifController,
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
record_metrics: dict[str, RecordMetricsTypes],
|
||||
feature_metrics: dict[str, FeatureMetricsTypes],
|
||||
communicators: list[Communicator],
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.onvif = onvif
|
||||
self.camera_metrics = camera_metrics
|
||||
self.record_metrics = record_metrics
|
||||
self.feature_metrics = feature_metrics
|
||||
self.comms = communicators
|
||||
|
||||
for comm in self.comms:
|
||||
comm.subscribe(self._receive)
|
||||
|
||||
self._camera_settings_handlers: dict[str, Callable] = {
|
||||
"audio": self._on_audio_command,
|
||||
"detect": self._on_detect_command,
|
||||
"improve_contrast": self._on_motion_improve_contrast_command,
|
||||
"motion": self._on_motion_command,
|
||||
@ -186,6 +187,29 @@ class Dispatcher:
|
||||
motion_settings.threshold = payload # type: ignore[union-attr]
|
||||
self.publish(f"{camera_name}/motion_threshold/state", payload, retain=True)
|
||||
|
||||
def _on_audio_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for audio topic."""
|
||||
audio_settings = self.config.cameras[camera_name].audio
|
||||
|
||||
if payload == "ON":
|
||||
if not self.config.cameras[camera_name].audio.enabled_in_config:
|
||||
logger.error(
|
||||
"Audio detection must be enabled in the config to be turned on via MQTT."
|
||||
)
|
||||
return
|
||||
|
||||
if not audio_settings.enabled:
|
||||
logger.info(f"Turning on audio detection for {camera_name}")
|
||||
audio_settings.enabled = True
|
||||
self.feature_metrics[camera_name]["audio_enabled"].value = True
|
||||
elif payload == "OFF":
|
||||
if self.feature_metrics[camera_name]["audio_enabled"].value:
|
||||
logger.info(f"Turning off audio detection for {camera_name}")
|
||||
audio_settings.enabled = False
|
||||
self.feature_metrics[camera_name]["audio_enabled"].value = False
|
||||
|
||||
self.publish(f"{camera_name}/audio/state", payload, retain=True)
|
||||
|
||||
def _on_recordings_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for recordings topic."""
|
||||
record_settings = self.config.cameras[camera_name].record
|
||||
@ -200,12 +224,12 @@ class Dispatcher:
|
||||
if not record_settings.enabled:
|
||||
logger.info(f"Turning on recordings for {camera_name}")
|
||||
record_settings.enabled = True
|
||||
self.record_metrics[camera_name]["record_enabled"].value = True
|
||||
self.feature_metrics[camera_name]["record_enabled"].value = True
|
||||
elif payload == "OFF":
|
||||
if self.record_metrics[camera_name]["record_enabled"].value:
|
||||
if self.feature_metrics[camera_name]["record_enabled"].value:
|
||||
logger.info(f"Turning off recordings for {camera_name}")
|
||||
record_settings.enabled = False
|
||||
self.record_metrics[camera_name]["record_enabled"].value = False
|
||||
self.feature_metrics[camera_name]["record_enabled"].value = False
|
||||
|
||||
self.publish(f"{camera_name}/recordings/state", payload, retain=True)
|
||||
|
||||
|
||||
@ -41,7 +41,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
for camera_name, camera in self.config.cameras.items():
|
||||
self.publish(
|
||||
f"{camera_name}/recordings/state",
|
||||
"ON" if camera.record.enabled else "OFF",
|
||||
"ON" if camera.record.enabled_in_config else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
@ -49,6 +49,11 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"ON" if camera.snapshots.enabled else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/audio/state",
|
||||
"ON" if camera.audio.enabled_in_config else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/detect/state",
|
||||
"ON" if camera.detect.enabled else "OFF",
|
||||
@ -144,6 +149,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"recordings",
|
||||
"snapshots",
|
||||
"detect",
|
||||
"audio",
|
||||
"motion",
|
||||
"improve_contrast",
|
||||
"motion_threshold",
|
||||
|
||||
@ -40,6 +40,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
|
||||
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||
|
||||
DEFAULT_TRACKED_OBJECTS = ["person"]
|
||||
DEFAULT_LISTEN_AUDIO = ["bark", "speech", "yell", "scream"]
|
||||
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
||||
|
||||
|
||||
@ -197,7 +198,7 @@ class RecordConfig(FrigateBaseModel):
|
||||
|
||||
class MotionConfig(FrigateBaseModel):
|
||||
threshold: int = Field(
|
||||
default=20,
|
||||
default=30,
|
||||
title="Motion detection threshold (1-255).",
|
||||
ge=1,
|
||||
le=255,
|
||||
@ -397,6 +398,19 @@ class ObjectConfig(FrigateBaseModel):
|
||||
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
|
||||
|
||||
|
||||
class AudioConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable audio events.")
|
||||
max_not_heard: int = Field(
|
||||
default=30, title="Seconds of not hearing the type of audio to end the event."
|
||||
)
|
||||
listen: List[str] = Field(
|
||||
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
|
||||
)
|
||||
enabled_in_config: Optional[bool] = Field(
|
||||
title="Keep track of original state of audio detection."
|
||||
)
|
||||
|
||||
|
||||
class BirdseyeModeEnum(str, Enum):
|
||||
objects = "objects"
|
||||
motion = "motion"
|
||||
@ -473,9 +487,14 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
default_factory=FfmpegOutputArgsConfig,
|
||||
title="FFmpeg output arguments per role.",
|
||||
)
|
||||
retry_interval: float = Field(
|
||||
default=10.0,
|
||||
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
||||
)
|
||||
|
||||
|
||||
class CameraRoleEnum(str, Enum):
|
||||
audio = "audio"
|
||||
record = "record"
|
||||
rtmp = "rtmp"
|
||||
detect = "detect"
|
||||
@ -637,6 +656,9 @@ class CameraConfig(FrigateBaseModel):
|
||||
objects: ObjectConfig = Field(
|
||||
default_factory=ObjectConfig, title="Object configuration."
|
||||
)
|
||||
audio: AudioConfig = Field(
|
||||
default_factory=AudioConfig, title="Audio events configuration."
|
||||
)
|
||||
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
|
||||
detect: DetectConfig = Field(
|
||||
default_factory=DetectConfig, title="Object detection configuration."
|
||||
@ -667,12 +689,16 @@ class CameraConfig(FrigateBaseModel):
|
||||
# add roles to the input if there is only one
|
||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||
has_rtmp = "rtmp" in config["ffmpeg"]["inputs"][0].get("roles", [])
|
||||
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
|
||||
|
||||
config["ffmpeg"]["inputs"][0]["roles"] = [
|
||||
"record",
|
||||
"detect",
|
||||
]
|
||||
|
||||
if has_audio:
|
||||
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
|
||||
|
||||
if has_rtmp:
|
||||
config["ffmpeg"]["inputs"][0]["roles"].append("rtmp")
|
||||
|
||||
@ -805,6 +831,11 @@ def verify_config_roles(camera_config: CameraConfig) -> None:
|
||||
f"Camera {camera_config.name} has rtmp enabled, but rtmp is not assigned to an input."
|
||||
)
|
||||
|
||||
if camera_config.audio.enabled and "audio" not in assigned_roles:
|
||||
raise ValueError(
|
||||
f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
|
||||
)
|
||||
|
||||
|
||||
def verify_valid_live_stream_name(
|
||||
frigate_config: FrigateConfig, camera_config: CameraConfig
|
||||
@ -917,6 +948,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
objects: ObjectConfig = Field(
|
||||
default_factory=ObjectConfig, title="Global object configuration."
|
||||
)
|
||||
audio: AudioConfig = Field(
|
||||
default_factory=AudioConfig, title="Global Audio events configuration."
|
||||
)
|
||||
motion: Optional[MotionConfig] = Field(
|
||||
title="Global motion detection configuration."
|
||||
)
|
||||
@ -941,6 +975,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
# Global config to propagate down to camera level
|
||||
global_config = config.dict(
|
||||
include={
|
||||
"audio": ...,
|
||||
"birdseye": ...,
|
||||
"record": ...,
|
||||
"snapshots": ...,
|
||||
@ -986,8 +1021,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
camera_config.onvif.password = camera_config.onvif.password.format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
# set config recording value
|
||||
# set config pre-value
|
||||
camera_config.record.enabled_in_config = camera_config.record.enabled
|
||||
camera_config.audio.enabled_in_config = camera_config.audio.enabled
|
||||
|
||||
# Add default filters
|
||||
object_keys = camera_config.objects.track
|
||||
|
||||
@ -8,10 +8,28 @@ EXPORT_DIR = f"{BASE_DIR}/exports"
|
||||
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
|
||||
CACHE_DIR = "/tmp/cache"
|
||||
YAML_EXT = (".yaml", ".yml")
|
||||
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
||||
PLUS_ENV_VAR = "PLUS_API_KEY"
|
||||
PLUS_API_HOST = "https://api.frigate.video"
|
||||
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
|
||||
|
||||
# Attributes
|
||||
|
||||
ATTRIBUTE_LABEL_MAP = {
|
||||
"person": ["face", "amazon"],
|
||||
"car": ["ups", "fedex", "amazon", "license_plate"],
|
||||
}
|
||||
ALL_ATTRIBUTE_LABELS = [
|
||||
item for sublist in ATTRIBUTE_LABEL_MAP.values() for item in sublist
|
||||
]
|
||||
|
||||
# Audio Consts
|
||||
|
||||
AUDIO_DURATION = 0.975
|
||||
AUDIO_FORMAT = "s16le"
|
||||
AUDIO_MAX_BIT_RANGE = 32768.0
|
||||
AUDIO_SAMPLE_RATE = 16000
|
||||
|
||||
# Regex Consts
|
||||
|
||||
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
|
||||
@ -28,5 +46,4 @@ DRIVER_INTEL_iHD = "iHD"
|
||||
# Record Values
|
||||
|
||||
MAX_SEGMENT_DURATION = 600
|
||||
SECONDS_IN_DAY = 60 * 60 * 24
|
||||
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times
|
||||
|
||||
255
frigate/events/audio.py
Normal file
255
frigate/events/audio.py
Normal file
@ -0,0 +1,255 @@
|
||||
"""Handle creating audio events."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import signal
|
||||
import threading
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.const import (
|
||||
AUDIO_DURATION,
|
||||
AUDIO_FORMAT,
|
||||
AUDIO_MAX_BIT_RANGE,
|
||||
AUDIO_SAMPLE_RATE,
|
||||
CACHE_DIR,
|
||||
FRIGATE_LOCALHOST,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe
|
||||
from frigate.object_detection import load_labels
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import get_ffmpeg_arg_list, listen
|
||||
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_ffmpeg_command(input_args: list[str], input_path: str, pipe: str) -> list[str]:
|
||||
return get_ffmpeg_arg_list(
|
||||
f"ffmpeg {{}} -i {{}} -f {AUDIO_FORMAT} -ar {AUDIO_SAMPLE_RATE} -ac 1 -y {{}}".format(
|
||||
" ".join(input_args),
|
||||
input_path,
|
||||
pipe,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def listen_to_audio(
|
||||
config: FrigateConfig,
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
) -> None:
|
||||
stop_event = mp.Event()
|
||||
audio_threads: list[threading.Thread] = []
|
||||
|
||||
def exit_process() -> None:
|
||||
for thread in audio_threads:
|
||||
thread.join()
|
||||
|
||||
logger.info("Exiting audio detector...")
|
||||
|
||||
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
|
||||
stop_event.set()
|
||||
exit_process()
|
||||
|
||||
signal.signal(signal.SIGTERM, receiveSignal)
|
||||
signal.signal(signal.SIGINT, receiveSignal)
|
||||
|
||||
threading.current_thread().name = "process:audio_manager"
|
||||
setproctitle("frigate.audio_manager")
|
||||
listen()
|
||||
|
||||
for camera in config.cameras.values():
|
||||
if camera.enabled and camera.audio.enabled_in_config:
|
||||
audio = AudioEventMaintainer(camera, process_info, stop_event)
|
||||
audio_threads.append(audio)
|
||||
audio.start()
|
||||
|
||||
|
||||
class AudioTfl:
|
||||
def __init__(self, stop_event: mp.Event):
|
||||
self.stop_event = stop_event
|
||||
self.labels = load_labels("/audio-labelmap.txt")
|
||||
self.interpreter = Interpreter(
|
||||
model_path="/cpu_audio_model.tflite",
|
||||
num_threads=2,
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
def _detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
res = self.interpreter.get_tensor(self.tensor_output_details[0]["index"])[0]
|
||||
non_zero_indices = res > 0
|
||||
class_ids = np.argpartition(-res, 20)[:20]
|
||||
class_ids = class_ids[np.argsort(-res[class_ids])]
|
||||
class_ids = class_ids[non_zero_indices[class_ids]]
|
||||
scores = res[class_ids]
|
||||
boxes = np.full((scores.shape[0], 4), -1, np.float32)
|
||||
count = len(scores)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
|
||||
def detect(self, tensor_input, threshold=0.8):
|
||||
detections = []
|
||||
|
||||
if self.stop_event.is_set():
|
||||
return detections
|
||||
|
||||
raw_detections = self._detect_raw(tensor_input)
|
||||
|
||||
for d in raw_detections:
|
||||
if d[1] < threshold:
|
||||
break
|
||||
detections.append(
|
||||
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
||||
)
|
||||
return detections
|
||||
|
||||
|
||||
class AudioEventMaintainer(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraConfig,
|
||||
feature_metrics: dict[str, FeatureMetricsTypes],
|
||||
stop_event: mp.Event,
|
||||
) -> None:
|
||||
threading.Thread.__init__(self)
|
||||
self.name = f"{camera.name}_audio_event_processor"
|
||||
self.config = camera
|
||||
self.feature_metrics = feature_metrics
|
||||
self.detections: dict[dict[str, any]] = feature_metrics
|
||||
self.stop_event = stop_event
|
||||
self.detector = AudioTfl(stop_event)
|
||||
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
|
||||
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
|
||||
self.pipe = f"{CACHE_DIR}/{self.config.name}-audio"
|
||||
self.ffmpeg_cmd = get_ffmpeg_command(
|
||||
get_ffmpeg_arg_list(self.config.ffmpeg.global_args)
|
||||
+ parse_preset_input("preset-rtsp-audio-only", 1),
|
||||
[i.path for i in self.config.ffmpeg.inputs if "audio" in i.roles][0],
|
||||
self.pipe,
|
||||
)
|
||||
self.pipe_file = None
|
||||
self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio")
|
||||
self.audio_listener = None
|
||||
|
||||
def detect_audio(self, audio) -> None:
|
||||
if not self.feature_metrics[self.config.name]["audio_enabled"].value:
|
||||
return
|
||||
|
||||
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
|
||||
model_detections = self.detector.detect(waveform)
|
||||
|
||||
for label, score, _ in model_detections:
|
||||
if label not in self.config.audio.listen:
|
||||
continue
|
||||
|
||||
self.handle_detection(label, score)
|
||||
|
||||
self.expire_detections()
|
||||
|
||||
def handle_detection(self, label: str, score: float) -> None:
|
||||
if self.detections.get(label):
|
||||
self.detections[label][
|
||||
"last_detection"
|
||||
] = datetime.datetime.now().timestamp()
|
||||
else:
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create",
|
||||
json={"duration": None, "source_type": "audio"},
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
event_id = resp.json()[0]["event_id"]
|
||||
self.detections[label] = {
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
"last_detection": datetime.datetime.now().timestamp(),
|
||||
}
|
||||
|
||||
def expire_detections(self) -> None:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
|
||||
for detection in self.detections.values():
|
||||
if not detection:
|
||||
continue
|
||||
|
||||
if (
|
||||
now - detection.get("last_detection", now)
|
||||
> self.config.audio.max_not_heard
|
||||
):
|
||||
resp = requests.put(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
|
||||
json={
|
||||
"end_time": detection["last_detection"]
|
||||
+ self.config.record.events.post_capture
|
||||
},
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
self.detections[detection["label"]] = None
|
||||
else:
|
||||
logger.warn(
|
||||
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
|
||||
)
|
||||
|
||||
def restart_audio_pipe(self) -> None:
|
||||
try:
|
||||
os.mkfifo(self.pipe)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
self.audio_listener = start_or_restart_ffmpeg(
|
||||
self.ffmpeg_cmd, logger, self.logpipe, None, self.audio_listener
|
||||
)
|
||||
|
||||
def read_audio(self) -> None:
|
||||
if self.pipe_file is None:
|
||||
self.pipe_file = open(self.pipe, "rb")
|
||||
|
||||
try:
|
||||
audio = np.frombuffer(self.pipe_file.read(self.chunk_size), dtype=np.int16)
|
||||
self.detect_audio(audio)
|
||||
except BrokenPipeError:
|
||||
self.logpipe.dump()
|
||||
self.restart_audio_pipe()
|
||||
|
||||
def run(self) -> None:
|
||||
self.restart_audio_pipe()
|
||||
|
||||
while not self.stop_event.is_set():
|
||||
self.read_audio()
|
||||
|
||||
self.pipe_file.close()
|
||||
stop_ffmpeg(self.audio_listener, logger)
|
||||
self.logpipe.close()
|
||||
@ -6,10 +6,10 @@ import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
from multiprocessing.queues import Queue
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
from faster_fifo import Queue
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
@ -29,6 +29,7 @@ class ExternalEventProcessor:
|
||||
self,
|
||||
camera: str,
|
||||
label: str,
|
||||
source_type: str,
|
||||
sub_label: Optional[str],
|
||||
duration: Optional[int],
|
||||
include_recording: bool,
|
||||
@ -61,17 +62,17 @@ class ExternalEventProcessor:
|
||||
"thumbnail": thumbnail,
|
||||
"has_clip": camera_config.record.enabled and include_recording,
|
||||
"has_snapshot": True,
|
||||
"type": source_type,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return event_id
|
||||
|
||||
def finish_manual_event(self, event_id: str) -> None:
|
||||
def finish_manual_event(self, event_id: str, end_time: float) -> None:
|
||||
"""Finish external event with indeterminate duration."""
|
||||
now = datetime.datetime.now().timestamp()
|
||||
self.queue.put(
|
||||
(EventTypeEnum.api, "end", None, {"id": event_id, "end_time": now})
|
||||
(EventTypeEnum.api, "end", None, {"id": event_id, "end_time": end_time})
|
||||
)
|
||||
|
||||
def _write_images(
|
||||
|
||||
@ -3,10 +3,11 @@ import logging
|
||||
import queue
|
||||
import threading
|
||||
from enum import Enum
|
||||
from multiprocessing.queues import Queue
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Dict
|
||||
|
||||
from faster_fifo import Queue
|
||||
|
||||
from frigate.config import EventsConfig, FrigateConfig
|
||||
from frigate.models import Event
|
||||
from frigate.types import CameraMetricsTypes
|
||||
@ -17,7 +18,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class EventTypeEnum(str, Enum):
|
||||
api = "api"
|
||||
# audio = "audio"
|
||||
tracked_object = "tracked_object"
|
||||
|
||||
|
||||
@ -72,19 +72,21 @@ class EventProcessor(threading.Thread):
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
|
||||
|
||||
self.timeline_queue.put(
|
||||
(
|
||||
camera,
|
||||
source_type,
|
||||
event_type,
|
||||
self.events_in_process.get(event_data["id"]),
|
||||
event_data,
|
||||
)
|
||||
logger.debug(
|
||||
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"
|
||||
)
|
||||
|
||||
if source_type == EventTypeEnum.tracked_object:
|
||||
self.timeline_queue.put(
|
||||
(
|
||||
camera,
|
||||
source_type,
|
||||
event_type,
|
||||
self.events_in_process.get(event_data["id"]),
|
||||
event_data,
|
||||
)
|
||||
)
|
||||
|
||||
if event_type == "start":
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
continue
|
||||
@ -191,6 +193,7 @@ class EventProcessor(threading.Thread):
|
||||
"score": score,
|
||||
"top_score": event_data["top_score"],
|
||||
"attributes": attributes,
|
||||
"type": "object",
|
||||
},
|
||||
}
|
||||
|
||||
@ -214,8 +217,8 @@ class EventProcessor(threading.Thread):
|
||||
del self.events_in_process[event_data["id"]]
|
||||
self.event_processed_queue.put((event_data["id"], camera))
|
||||
|
||||
def handle_external_detection(self, type: str, event_data: Event):
|
||||
if type == "new":
|
||||
def handle_external_detection(self, event_type: str, event_data: Event) -> None:
|
||||
if event_type == "new":
|
||||
event = {
|
||||
Event.id: event_data["id"],
|
||||
Event.label: event_data["label"],
|
||||
@ -227,22 +230,16 @@ class EventProcessor(threading.Thread):
|
||||
Event.has_clip: event_data["has_clip"],
|
||||
Event.has_snapshot: event_data["has_snapshot"],
|
||||
Event.zones: [],
|
||||
Event.data: {},
|
||||
Event.data: {"type": event_data["type"]},
|
||||
}
|
||||
elif type == "end":
|
||||
Event.insert(event).execute()
|
||||
elif event_type == "end":
|
||||
event = {
|
||||
Event.id: event_data["id"],
|
||||
Event.end_time: event_data["end_time"],
|
||||
}
|
||||
|
||||
try:
|
||||
(
|
||||
Event.insert(event)
|
||||
.on_conflict(
|
||||
conflict_target=[Event.id],
|
||||
update=event,
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(f"Failed to update manual event: {event_data['id']}")
|
||||
try:
|
||||
Event.update(event).where(Event.id == event_data["id"]).execute()
|
||||
except Exception:
|
||||
logger.warning(f"Failed to update manual event: {event_data['id']}")
|
||||
|
||||
@ -282,6 +282,13 @@ PRESETS_INPUT = {
|
||||
"-use_wallclock_as_timestamps",
|
||||
"1",
|
||||
],
|
||||
"preset-rtsp-audio-only": [
|
||||
"-rtsp_transport",
|
||||
"tcp",
|
||||
TIMEOUT_PARAM,
|
||||
"5000000",
|
||||
"-vn",
|
||||
],
|
||||
"preset-rtsp-restream": _user_agent_args
|
||||
+ [
|
||||
"-rtsp_transport",
|
||||
|
||||
@ -410,6 +410,24 @@ def set_sub_label(id):
|
||||
)
|
||||
|
||||
|
||||
@bp.route("/labels")
|
||||
def get_labels():
|
||||
camera = request.args.get("camera", type=str, default="")
|
||||
|
||||
try:
|
||||
if camera:
|
||||
events = Event.select(Event.label).where(Event.camera == camera).distinct()
|
||||
else:
|
||||
events = Event.select(Event.label).distinct()
|
||||
except Exception as e:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
|
||||
)
|
||||
|
||||
labels = sorted([e.label for e in events])
|
||||
return jsonify(labels)
|
||||
|
||||
|
||||
@bp.route("/sub_labels")
|
||||
def get_sub_labels():
|
||||
split_joined = request.args.get("split_joined", type=int)
|
||||
@ -866,6 +884,7 @@ def create_event(camera_name, label):
|
||||
event_id = current_app.external_processor.create_manual_event(
|
||||
camera_name,
|
||||
label,
|
||||
json.get("source_type", "api"),
|
||||
json.get("sub_label", None),
|
||||
json.get("duration", 30),
|
||||
json.get("include_recording", True),
|
||||
@ -890,8 +909,11 @@ def create_event(camera_name, label):
|
||||
|
||||
@bp.route("/events/<event_id>/end", methods=["PUT"])
|
||||
def end_event(event_id):
|
||||
json: dict[str, any] = request.get_json(silent=True) or {}
|
||||
|
||||
try:
|
||||
current_app.external_processor.finish_manual_event(event_id)
|
||||
end_time = json.get("end_time", datetime.now().timestamp())
|
||||
current_app.external_processor.finish_manual_event(event_id, end_time)
|
||||
except Exception:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{event_id} must be set and valid."}, 404
|
||||
@ -1100,6 +1122,15 @@ def latest_frame(camera_name):
|
||||
height = int(request.args.get("h", str(frame.shape[0])))
|
||||
width = int(height * frame.shape[1] / frame.shape[0])
|
||||
|
||||
if frame is None:
|
||||
return "Unable to get valid frame from {}".format(camera_name), 500
|
||||
|
||||
if height < 1 or width < 1:
|
||||
return (
|
||||
"Invalid height / width requested :: {} / {}".format(height, width),
|
||||
400,
|
||||
)
|
||||
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
ret, jpg = cv2.imencode(
|
||||
|
||||
@ -7,10 +7,10 @@ import signal
|
||||
import threading
|
||||
from collections import deque
|
||||
from logging import handlers
|
||||
from multiprocessing.queues import Queue
|
||||
from types import FrameType
|
||||
from typing import Deque, Optional
|
||||
|
||||
from faster_fifo import Queue
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.util import clean_camera_user_pass
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import cv2
|
||||
import imutils
|
||||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter
|
||||
|
||||
from frigate.config import MotionConfig
|
||||
from frigate.motion import MotionDetector
|
||||
@ -15,9 +16,10 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
improve_contrast,
|
||||
threshold,
|
||||
contour_area,
|
||||
clipLimit=2.0,
|
||||
tileGridSize=(2, 2),
|
||||
name="improved",
|
||||
blur_radius=1,
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
contrast_frame_history=50,
|
||||
):
|
||||
self.name = name
|
||||
self.config = config
|
||||
@ -28,13 +30,12 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
config.frame_height * frame_shape[1] // frame_shape[0],
|
||||
)
|
||||
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
|
||||
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
|
||||
self.motion_frame_count = 0
|
||||
self.frame_counter = 0
|
||||
resized_mask = cv2.resize(
|
||||
config.mask,
|
||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
interpolation=cv2.INTER_AREA,
|
||||
)
|
||||
self.mask = np.where(resized_mask == [0])
|
||||
self.save_images = False
|
||||
@ -42,7 +43,11 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.improve_contrast = improve_contrast
|
||||
self.threshold = threshold
|
||||
self.contour_area = contour_area
|
||||
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
|
||||
self.blur_radius = blur_radius
|
||||
self.interpolation = interpolation
|
||||
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
|
||||
self.contrast_values[:, 1:2] = 255
|
||||
self.contrast_values_index = 0
|
||||
|
||||
def detect(self, frame):
|
||||
motion_boxes = []
|
||||
@ -53,27 +58,44 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
resized_frame = cv2.resize(
|
||||
gray,
|
||||
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
interpolation=self.interpolation,
|
||||
)
|
||||
|
||||
if self.save_images:
|
||||
resized_saved = resized_frame.copy()
|
||||
|
||||
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
|
||||
|
||||
if self.save_images:
|
||||
blurred_saved = resized_frame.copy()
|
||||
|
||||
# Improve contrast
|
||||
if self.improve_contrast.value:
|
||||
resized_frame = self.clahe.apply(resized_frame)
|
||||
# TODO tracking moving average of min/max to avoid sudden contrast changes
|
||||
minval = np.percentile(resized_frame, 4).astype(np.uint8)
|
||||
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
|
||||
# skip contrast calcs if the image is a single color
|
||||
if minval < maxval:
|
||||
# keep track of the last 50 contrast values
|
||||
self.contrast_values[self.contrast_values_index] = [minval, maxval]
|
||||
self.contrast_values_index += 1
|
||||
if self.contrast_values_index == len(self.contrast_values):
|
||||
self.contrast_values_index = 0
|
||||
|
||||
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
|
||||
|
||||
resized_frame = np.clip(resized_frame, avg_min, avg_max)
|
||||
resized_frame = (
|
||||
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
|
||||
).astype(np.uint8)
|
||||
|
||||
if self.save_images:
|
||||
contrasted_saved = resized_frame.copy()
|
||||
|
||||
# mask frame
|
||||
# this has to come after contrast improvement
|
||||
resized_frame[self.mask] = [255]
|
||||
|
||||
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
|
||||
|
||||
if self.save_images:
|
||||
blurred_saved = resized_frame.copy()
|
||||
|
||||
if self.save_images or self.calibrating:
|
||||
self.frame_counter += 1
|
||||
# compare to average
|
||||
@ -134,8 +156,8 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
)
|
||||
frames = [
|
||||
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
||||
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
||||
thresh_dilated,
|
||||
|
||||
@ -188,15 +188,14 @@ class TrackedObject:
|
||||
zone_score = self.zone_presence.get(name, 0)
|
||||
# check if the object is in the zone
|
||||
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
|
||||
self.zone_presence[name] = zone_score + 1
|
||||
# if the object passed the filters once, dont apply again
|
||||
if name in self.current_zones or not zone_filtered(self, zone.filters):
|
||||
self.zone_presence[name] = zone_score + 1
|
||||
|
||||
# an object is only considered present in a zone if it has a zone inertia of 3+
|
||||
if zone_score >= zone.inertia:
|
||||
# if the object passed the filters once, dont apply again
|
||||
if name in self.current_zones or not zone_filtered(
|
||||
self, zone.filters
|
||||
):
|
||||
# an object is only considered present in a zone if it has a zone inertia of 3+
|
||||
if zone_score >= zone.inertia:
|
||||
current_zones.append(name)
|
||||
|
||||
if name not in self.entered_zones:
|
||||
self.entered_zones.append(name)
|
||||
else:
|
||||
|
||||
@ -156,7 +156,12 @@ class BroadcastThread(threading.Thread):
|
||||
|
||||
|
||||
class BirdsEyeFrameManager:
|
||||
def __init__(self, config: FrigateConfig, frame_manager: SharedMemoryFrameManager):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
frame_manager: SharedMemoryFrameManager,
|
||||
stop_event: mp.Event,
|
||||
):
|
||||
self.config = config
|
||||
self.mode = config.birdseye.mode
|
||||
self.frame_manager = frame_manager
|
||||
@ -165,6 +170,7 @@ class BirdsEyeFrameManager:
|
||||
self.frame_shape = (height, width)
|
||||
self.yuv_shape = (height * 3 // 2, width)
|
||||
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
|
||||
self.stop_event = stop_event
|
||||
|
||||
# initialize the frame as black and with the Frigate logo
|
||||
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
|
||||
@ -270,119 +276,6 @@ class BirdsEyeFrameManager:
|
||||
def update_frame(self):
|
||||
"""Update to a new frame for birdseye."""
|
||||
|
||||
def calculate_layout(
|
||||
canvas, cameras_to_add: list[str], coefficient
|
||||
) -> tuple[any]:
|
||||
"""Calculate the optimal layout for 2+ cameras."""
|
||||
camera_layout: list[list[any]] = []
|
||||
camera_layout.append([])
|
||||
canvas_gcd = math.gcd(canvas[0], canvas[1])
|
||||
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
|
||||
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
|
||||
starting_x = 0
|
||||
x = starting_x
|
||||
y = 0
|
||||
y_i = 0
|
||||
max_y = 0
|
||||
for camera in cameras_to_add:
|
||||
camera_dims = self.cameras[camera]["dimensions"].copy()
|
||||
camera_gcd = math.gcd(camera_dims[0], camera_dims[1])
|
||||
camera_aspect_x = camera_dims[0] / camera_gcd
|
||||
camera_aspect_y = camera_dims[1] / camera_gcd
|
||||
|
||||
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
|
||||
# account for slightly off 16:9 cameras
|
||||
camera_aspect_x = 16
|
||||
camera_aspect_y = 9
|
||||
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
|
||||
# make 4:3 cameras the same relative size as 16:9
|
||||
camera_aspect_x = 12
|
||||
camera_aspect_y = 9
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
portrait = True
|
||||
else:
|
||||
portrait = False
|
||||
|
||||
if (x + camera_aspect_x) <= canvas_aspect_x:
|
||||
# insert if camera can fit on current row
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(
|
||||
camera_aspect_x,
|
||||
camera_aspect_y,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
if portrait:
|
||||
starting_x = camera_aspect_x
|
||||
else:
|
||||
max_y = max(
|
||||
max_y,
|
||||
camera_aspect_y,
|
||||
)
|
||||
|
||||
x += camera_aspect_x
|
||||
else:
|
||||
# move on to the next row and insert
|
||||
y += max_y
|
||||
y_i += 1
|
||||
camera_layout.append([])
|
||||
x = starting_x
|
||||
|
||||
if x + camera_aspect_x > canvas_aspect_x:
|
||||
return None
|
||||
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(camera_aspect_x, camera_aspect_y),
|
||||
)
|
||||
)
|
||||
x += camera_aspect_x
|
||||
|
||||
if y + max_y > canvas_aspect_y:
|
||||
return None
|
||||
|
||||
row_height = int(canvas_height / coefficient)
|
||||
|
||||
final_camera_layout = []
|
||||
starting_x = 0
|
||||
y = 0
|
||||
|
||||
for row in camera_layout:
|
||||
final_row = []
|
||||
x = starting_x
|
||||
for cameras in row:
|
||||
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
scaled_height = int(row_height * coefficient)
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
starting_x = scaled_width
|
||||
else:
|
||||
scaled_height = row_height
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
|
||||
if (
|
||||
x + scaled_width > canvas_width
|
||||
or y + scaled_height > canvas_height
|
||||
):
|
||||
return None
|
||||
|
||||
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
|
||||
x += scaled_width
|
||||
y += row_height
|
||||
final_camera_layout.append(final_row)
|
||||
|
||||
return final_camera_layout
|
||||
|
||||
# determine how many cameras are tracking objects within the last 30 seconds
|
||||
active_cameras = set(
|
||||
[
|
||||
@ -405,10 +298,8 @@ class BirdsEyeFrameManager:
|
||||
self.clear_frame()
|
||||
return True
|
||||
|
||||
# check if we need to reset the layout because there are new cameras to add
|
||||
reset_layout = (
|
||||
True if len(active_cameras.difference(self.active_cameras)) > 0 else False
|
||||
)
|
||||
# check if we need to reset the layout because there is a different number of cameras
|
||||
reset_layout = len(self.active_cameras) - len(active_cameras) != 0
|
||||
|
||||
# reset the layout if it needs to be different
|
||||
if reset_layout:
|
||||
@ -458,7 +349,10 @@ class BirdsEyeFrameManager:
|
||||
|
||||
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
|
||||
while calculating:
|
||||
layout_candidate = calculate_layout(
|
||||
if self.stop_event.is_set():
|
||||
return
|
||||
|
||||
layout_candidate = self.calculate_layout(
|
||||
(canvas_width, canvas_height),
|
||||
active_cameras_to_add,
|
||||
coefficient,
|
||||
@ -484,6 +378,145 @@ class BirdsEyeFrameManager:
|
||||
|
||||
return True
|
||||
|
||||
def calculate_layout(
|
||||
self, canvas, cameras_to_add: list[str], coefficient
|
||||
) -> tuple[any]:
|
||||
"""Calculate the optimal layout for 2+ cameras."""
|
||||
|
||||
def map_layout(row_height: int):
|
||||
"""Map the calculated layout."""
|
||||
candidate_layout = []
|
||||
starting_x = 0
|
||||
x = 0
|
||||
max_width = 0
|
||||
y = 0
|
||||
|
||||
for row in camera_layout:
|
||||
final_row = []
|
||||
max_width = max(max_width, x)
|
||||
x = starting_x
|
||||
for cameras in row:
|
||||
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
scaled_height = int(row_height * 2)
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
starting_x = scaled_width
|
||||
else:
|
||||
scaled_height = row_height
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
|
||||
# layout is too large
|
||||
if (
|
||||
x + scaled_width > canvas_width
|
||||
or y + scaled_height > canvas_height
|
||||
):
|
||||
return 0, 0, None
|
||||
|
||||
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
|
||||
x += scaled_width
|
||||
|
||||
y += row_height
|
||||
candidate_layout.append(final_row)
|
||||
|
||||
return max_width, y, candidate_layout
|
||||
|
||||
canvas_width = canvas[0]
|
||||
canvas_height = canvas[1]
|
||||
camera_layout: list[list[any]] = []
|
||||
camera_layout.append([])
|
||||
canvas_gcd = math.gcd(canvas[0], canvas[1])
|
||||
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
|
||||
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
|
||||
starting_x = 0
|
||||
x = starting_x
|
||||
y = 0
|
||||
y_i = 0
|
||||
max_y = 0
|
||||
for camera in cameras_to_add:
|
||||
camera_dims = self.cameras[camera]["dimensions"].copy()
|
||||
camera_gcd = math.gcd(camera_dims[0], camera_dims[1])
|
||||
camera_aspect_x = camera_dims[0] / camera_gcd
|
||||
camera_aspect_y = camera_dims[1] / camera_gcd
|
||||
|
||||
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
|
||||
# account for slightly off 16:9 cameras
|
||||
camera_aspect_x = 16
|
||||
camera_aspect_y = 9
|
||||
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
|
||||
# make 4:3 cameras the same relative size as 16:9
|
||||
camera_aspect_x = 12
|
||||
camera_aspect_y = 9
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
portrait = True
|
||||
else:
|
||||
portrait = False
|
||||
|
||||
if (x + camera_aspect_x) <= canvas_aspect_x:
|
||||
# insert if camera can fit on current row
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(
|
||||
camera_aspect_x,
|
||||
camera_aspect_y,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
if portrait:
|
||||
starting_x = camera_aspect_x
|
||||
else:
|
||||
max_y = max(
|
||||
max_y,
|
||||
camera_aspect_y,
|
||||
)
|
||||
|
||||
x += camera_aspect_x
|
||||
else:
|
||||
# move on to the next row and insert
|
||||
y += max_y
|
||||
y_i += 1
|
||||
camera_layout.append([])
|
||||
x = starting_x
|
||||
|
||||
if x + camera_aspect_x > canvas_aspect_x:
|
||||
return None
|
||||
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(camera_aspect_x, camera_aspect_y),
|
||||
)
|
||||
)
|
||||
x += camera_aspect_x
|
||||
|
||||
if y + max_y > canvas_aspect_y:
|
||||
return None
|
||||
|
||||
row_height = int(canvas_height / coefficient)
|
||||
total_width, total_height, standard_candidate_layout = map_layout(row_height)
|
||||
|
||||
# layout can't be optimized more
|
||||
if total_width / canvas_width >= 0.99:
|
||||
return standard_candidate_layout
|
||||
|
||||
scale_up_percent = min(
|
||||
1 - (total_width / canvas_width), 1 - (total_height / canvas_height)
|
||||
)
|
||||
row_height = int(row_height * (1 + round(scale_up_percent, 1)))
|
||||
_, _, scaled_layout = map_layout(row_height)
|
||||
|
||||
if scaled_layout:
|
||||
return scaled_layout
|
||||
else:
|
||||
return standard_candidate_layout
|
||||
|
||||
def update(self, camera, object_count, motion_count, frame_time, frame) -> bool:
|
||||
# don't process if birdseye is disabled for this camera
|
||||
camera_config = self.config.cameras[camera].birdseye
|
||||
@ -580,7 +613,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
for t in broadcasters.values():
|
||||
t.start()
|
||||
|
||||
birdseye_manager = BirdsEyeFrameManager(config, frame_manager)
|
||||
birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
|
||||
|
||||
if config.birdseye.restream:
|
||||
birdseye_buffer = frame_manager.create(
|
||||
|
||||
@ -8,10 +8,10 @@ import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
|
||||
from peewee import DatabaseError, DoesNotExist, chunked
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.config import FrigateConfig, RetainModeEnum
|
||||
from frigate.const import RECORD_DIR, SECONDS_IN_DAY
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
|
||||
from frigate.record.util import remove_empty_directories
|
||||
|
||||
@ -28,7 +28,7 @@ class RecordingCleanup(threading.Thread):
|
||||
self.stop_event = stop_event
|
||||
|
||||
def clean_tmp_clips(self) -> None:
|
||||
# delete any clips more than 5 minutes old
|
||||
"""delete any clips in the cache that are more than 5 minutes old."""
|
||||
for p in Path("/tmp/cache").rglob("clip_*.mp4"):
|
||||
logger.debug(f"Checking tmp clip {p}.")
|
||||
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
|
||||
@ -40,8 +40,8 @@ class RecordingCleanup(threading.Thread):
|
||||
p.unlink(missing_ok=True)
|
||||
|
||||
def expire_recordings(self) -> None:
|
||||
logger.debug("Start expire recordings (new).")
|
||||
|
||||
"""Delete recordings based on retention config."""
|
||||
logger.debug("Start expire recordings.")
|
||||
logger.debug("Start deleted cameras.")
|
||||
# Handle deleted cameras
|
||||
expire_days = self.config.record.retain.days
|
||||
@ -161,59 +161,10 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
|
||||
logger.debug("End all cameras.")
|
||||
logger.debug("End expire recordings (new).")
|
||||
|
||||
def expire_files(self) -> None:
|
||||
logger.debug("Start expire files (legacy).")
|
||||
|
||||
default_expire = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * self.config.record.retain.days
|
||||
)
|
||||
delete_before = {}
|
||||
|
||||
for name, camera in self.config.cameras.items():
|
||||
delete_before[name] = (
|
||||
datetime.datetime.now().timestamp()
|
||||
- SECONDS_IN_DAY * camera.record.retain.days
|
||||
)
|
||||
|
||||
# find all the recordings older than the oldest recording in the db
|
||||
try:
|
||||
oldest_recording = (
|
||||
Recordings.select().order_by(Recordings.start_time).limit(1).get()
|
||||
)
|
||||
|
||||
p = Path(oldest_recording.path)
|
||||
oldest_timestamp = p.stat().st_mtime - 1
|
||||
except DoesNotExist:
|
||||
oldest_timestamp = datetime.datetime.now().timestamp()
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Unable to find file from recordings database: {p}")
|
||||
Recordings.delete().where(Recordings.id == oldest_recording.id).execute()
|
||||
return
|
||||
|
||||
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
|
||||
|
||||
files_to_check = []
|
||||
|
||||
for root, _, files in os.walk(RECORD_DIR):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if os.path.getmtime(file_path) < oldest_timestamp:
|
||||
files_to_check.append(file_path)
|
||||
|
||||
for f in files_to_check:
|
||||
p = Path(f)
|
||||
try:
|
||||
if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire):
|
||||
p.unlink(missing_ok=True)
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Attempted to expire missing file: {f}")
|
||||
|
||||
logger.debug("End expire files (legacy).")
|
||||
logger.debug("End expire recordings.")
|
||||
|
||||
def sync_recordings(self) -> None:
|
||||
"""Check the db for stale recordings entries that don't exist in the filesystem."""
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# get all recordings in the db
|
||||
@ -283,5 +234,4 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
if counter == 0:
|
||||
self.expire_recordings()
|
||||
self.expire_files()
|
||||
remove_empty_directories(RECORD_DIR)
|
||||
|
||||
@ -20,7 +20,7 @@ import psutil
|
||||
from frigate.config import FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.types import RecordMetricsTypes
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import area, get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -31,7 +31,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
process_info: dict[str, RecordMetricsTypes],
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
stop_event: MpEvent,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
@ -14,7 +14,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
|
||||
from frigate.record.cleanup import RecordingCleanup
|
||||
from frigate.record.maintainer import RecordingMaintainer
|
||||
from frigate.types import RecordMetricsTypes
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import listen
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
def manage_recordings(
|
||||
config: FrigateConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
process_info: dict[str, RecordMetricsTypes],
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
) -> None:
|
||||
stop_event = mp.Event()
|
||||
|
||||
|
||||
@ -262,8 +262,12 @@ def stats_snapshot(
|
||||
for name, detector in stats_tracking["detectors"].items():
|
||||
pid = detector.detect_process.pid if detector.detect_process else None
|
||||
stats["detectors"][name] = {
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2),
|
||||
"detection_start": detector.detection_start.value,
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"pid": pid,
|
||||
}
|
||||
stats["detection_fps"] = round(total_detection_fps, 2)
|
||||
|
||||
@ -3,9 +3,10 @@
|
||||
import logging
|
||||
import queue
|
||||
import threading
|
||||
from multiprocessing.queues import Queue
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
from faster_fifo import Queue
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.events.maintainer import EventTypeEnum
|
||||
from frigate.models import Timeline
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
from multiprocessing.context import Process
|
||||
from multiprocessing.queues import Queue
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from typing import Optional, TypedDict
|
||||
|
||||
from faster_fifo import Queue
|
||||
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
|
||||
|
||||
@ -24,7 +25,8 @@ class CameraMetricsTypes(TypedDict):
|
||||
skipped_fps: Synchronized
|
||||
|
||||
|
||||
class RecordMetricsTypes(TypedDict):
|
||||
class FeatureMetricsTypes(TypedDict):
|
||||
audio_enabled: Synchronized
|
||||
record_enabled: Synchronized
|
||||
|
||||
|
||||
|
||||
@ -650,34 +650,42 @@ def restart_frigate():
|
||||
|
||||
|
||||
class EventsPerSecond:
|
||||
def __init__(self, max_events=1000):
|
||||
def __init__(self, max_events=1000, last_n_seconds=10):
|
||||
self._start = None
|
||||
self._max_events = max_events
|
||||
self._last_n_seconds = last_n_seconds
|
||||
self._timestamps = []
|
||||
|
||||
def start(self):
|
||||
self._start = datetime.datetime.now().timestamp()
|
||||
|
||||
def update(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
if self._start is None:
|
||||
self.start()
|
||||
self._timestamps.append(datetime.datetime.now().timestamp())
|
||||
self._start = now
|
||||
self._timestamps.append(now)
|
||||
# truncate the list when it goes 100 over the max_size
|
||||
if len(self._timestamps) > self._max_events + 100:
|
||||
self._timestamps = self._timestamps[(1 - self._max_events) :]
|
||||
self.expire_timestamps(now)
|
||||
|
||||
def eps(self, last_n_seconds=10):
|
||||
if self._start is None:
|
||||
self.start()
|
||||
# compute the (approximate) events in the last n seconds
|
||||
def eps(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
seconds = min(now - self._start, last_n_seconds)
|
||||
if self._start is None:
|
||||
self._start = now
|
||||
# compute the (approximate) events in the last n seconds
|
||||
self.expire_timestamps(now)
|
||||
seconds = min(now - self._start, self._last_n_seconds)
|
||||
# avoid divide by zero
|
||||
if seconds == 0:
|
||||
seconds = 1
|
||||
return (
|
||||
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
|
||||
)
|
||||
return len(self._timestamps) / seconds
|
||||
|
||||
# remove aged out timestamps
|
||||
def expire_timestamps(self, now):
|
||||
threshold = now - self._last_n_seconds
|
||||
while self._timestamps and self._timestamps[0] < threshold:
|
||||
del self._timestamps[0]
|
||||
|
||||
|
||||
def print_stack(sig, frame):
|
||||
|
||||
@ -15,7 +15,7 @@ import numpy as np
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import CameraConfig, DetectConfig
|
||||
from frigate.const import CACHE_DIR
|
||||
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
|
||||
from frigate.detectors.detector_config import PixelFormatEnum
|
||||
from frigate.log import LogPipe
|
||||
from frigate.motion import MotionDetector
|
||||
@ -172,7 +172,7 @@ def capture_frames(
|
||||
skipped_eps.start()
|
||||
while True:
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_eps.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
|
||||
current_frame.value = datetime.datetime.now().timestamp()
|
||||
frame_name = f"{camera_name}{current_frame.value}"
|
||||
@ -215,6 +215,7 @@ class CameraWatchdog(threading.Thread):
|
||||
config: CameraConfig,
|
||||
frame_queue,
|
||||
camera_fps,
|
||||
skipped_fps,
|
||||
ffmpeg_pid,
|
||||
stop_event,
|
||||
):
|
||||
@ -227,11 +228,13 @@ class CameraWatchdog(threading.Thread):
|
||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
||||
self.ffmpeg_other_processes: list[dict[str, any]] = []
|
||||
self.camera_fps = camera_fps
|
||||
self.skipped_fps = skipped_fps
|
||||
self.ffmpeg_pid = ffmpeg_pid
|
||||
self.frame_queue = frame_queue
|
||||
self.frame_shape = self.config.frame_shape_yuv
|
||||
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
||||
self.stop_event = stop_event
|
||||
self.sleeptime = self.config.ffmpeg.retry_interval
|
||||
|
||||
def run(self):
|
||||
self.start_ffmpeg_detect()
|
||||
@ -251,8 +254,8 @@ class CameraWatchdog(threading.Thread):
|
||||
}
|
||||
)
|
||||
|
||||
time.sleep(10)
|
||||
while not self.stop_event.wait(10):
|
||||
time.sleep(self.sleeptime)
|
||||
while not self.stop_event.wait(self.sleeptime):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
@ -346,6 +349,7 @@ class CameraWatchdog(threading.Thread):
|
||||
self.frame_shape,
|
||||
self.frame_queue,
|
||||
self.camera_fps,
|
||||
self.skipped_fps,
|
||||
self.stop_event,
|
||||
)
|
||||
self.capture_thread.start()
|
||||
@ -376,7 +380,14 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
class CameraCapture(threading.Thread):
|
||||
def __init__(
|
||||
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
|
||||
self,
|
||||
camera_name,
|
||||
ffmpeg_process,
|
||||
frame_shape,
|
||||
frame_queue,
|
||||
fps,
|
||||
skipped_fps,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = f"capture:{camera_name}"
|
||||
@ -385,14 +396,13 @@ class CameraCapture(threading.Thread):
|
||||
self.frame_queue = frame_queue
|
||||
self.fps = fps
|
||||
self.stop_event = stop_event
|
||||
self.skipped_fps = EventsPerSecond()
|
||||
self.skipped_fps = skipped_fps
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.ffmpeg_process = ffmpeg_process
|
||||
self.current_frame = mp.Value("d", 0.0)
|
||||
self.last_frame = 0
|
||||
|
||||
def run(self):
|
||||
self.skipped_fps.start()
|
||||
capture_frames(
|
||||
self.ffmpeg_process,
|
||||
self.camera_name,
|
||||
@ -424,6 +434,7 @@ def capture_camera(name, config: CameraConfig, process_info):
|
||||
config,
|
||||
frame_queue,
|
||||
process_info["camera_fps"],
|
||||
process_info["skipped_fps"],
|
||||
process_info["ffmpeg_pid"],
|
||||
stop_event,
|
||||
)
|
||||
@ -723,14 +734,6 @@ def process_frames(
|
||||
stop_event,
|
||||
exit_on_empty: bool = False,
|
||||
):
|
||||
# attribute labels are not tracked and are not assigned regions
|
||||
attribute_label_map = {
|
||||
"person": ["face", "amazon"],
|
||||
"car": ["ups", "fedex", "amazon", "license_plate"],
|
||||
}
|
||||
all_attribute_labels = [
|
||||
item for sublist in attribute_label_map.values() for item in sublist
|
||||
]
|
||||
fps = process_info["process_fps"]
|
||||
detection_fps = process_info["detection_fps"]
|
||||
current_frame_time = process_info["detection_frame"]
|
||||
@ -906,7 +909,7 @@ def process_frames(
|
||||
tracked_detections = [
|
||||
d
|
||||
for d in consolidated_detections
|
||||
if d[0] not in all_attribute_labels
|
||||
if d[0] not in ALL_ATTRIBUTE_LABELS
|
||||
]
|
||||
# now that we have refined our detections, we need to track objects
|
||||
object_tracker.match_and_update(frame_time, tracked_detections)
|
||||
@ -916,7 +919,7 @@ def process_frames(
|
||||
|
||||
# group the attribute detections based on what label they apply to
|
||||
attribute_detections = {}
|
||||
for label, attribute_labels in attribute_label_map.items():
|
||||
for label, attribute_labels in ATTRIBUTE_LABEL_MAP.items():
|
||||
attribute_detections[label] = [
|
||||
d for d in consolidated_detections if d[0] in attribute_labels
|
||||
]
|
||||
|
||||
@ -24,7 +24,9 @@ class FrigateWatchdog(threading.Thread):
|
||||
|
||||
# check the detection processes
|
||||
for detector in self.detectors.values():
|
||||
detection_start = detector.detection_start.value
|
||||
detection_start = detector.detection_start.value # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
if detection_start > 0.0 and now - detection_start > 10:
|
||||
logger.info(
|
||||
"Detection appears to be stuck. Restarting detection process..."
|
||||
|
||||
@ -1,14 +1,15 @@
|
||||
click == 8.1.*
|
||||
Flask == 2.3.*
|
||||
faster-fifo == 1.4.*
|
||||
imutils == 0.5.*
|
||||
matplotlib == 3.7.*
|
||||
mypy == 0.942
|
||||
mypy == 1.4.1
|
||||
numpy == 1.23.*
|
||||
onvif_zeep == 0.2.12
|
||||
opencv-python-headless == 4.5.5.*
|
||||
opencv-python-headless == 4.7.0.*
|
||||
paho-mqtt == 1.6.*
|
||||
peewee == 3.16.*
|
||||
peewee_migrate == 1.10.*
|
||||
peewee_migrate == 1.11.*
|
||||
psutil == 5.9.*
|
||||
pydantic == 1.10.*
|
||||
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
|
||||
|
||||
@ -16,6 +16,7 @@ export const handlers = [
|
||||
front: {
|
||||
name: 'front',
|
||||
objects: { track: ['taco', 'cat', 'dog'] },
|
||||
audio: { enabled: false, enabled_in_config: false },
|
||||
record: { enabled: true, enabled_in_config: true },
|
||||
detect: { width: 1280, height: 720 },
|
||||
snapshots: {},
|
||||
@ -25,6 +26,7 @@ export const handlers = [
|
||||
side: {
|
||||
name: 'side',
|
||||
objects: { track: ['taco', 'cat', 'dog'] },
|
||||
audio: { enabled: false, enabled_in_config: false },
|
||||
record: { enabled: false, enabled_in_config: true },
|
||||
detect: { width: 1280, height: 720 },
|
||||
snapshots: {},
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
<link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png" />
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32.png" />
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16.png" />
|
||||
<link rel="icon" type="image/svg+xml" href="/images/favicon.svg">
|
||||
<link rel="icon" type="image/svg+xml" href="/images/favicon.svg" />
|
||||
<link rel="manifest" href="/site.webmanifest" />
|
||||
<link rel="mask-icon" href="/images/favicon.svg" color="#3b82f7" />
|
||||
<meta name="msapplication-TileColor" content="#3b82f7" />
|
||||
|
||||
955
web/package-lock.json
generated
955
web/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -38,8 +38,8 @@
|
||||
"@testing-library/user-event": "^14.4.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.1",
|
||||
"@typescript-eslint/parser": "^5.59.1",
|
||||
"@vitest/coverage-c8": "^0.31.0",
|
||||
"@vitest/ui": "^0.31.0",
|
||||
"@vitest/coverage-v8": "^0.32.2",
|
||||
"@vitest/ui": "^0.32.2",
|
||||
"autoprefixer": "^10.4.14",
|
||||
"eslint": "^8.39.0",
|
||||
"eslint-config-preact": "^1.3.0",
|
||||
@ -53,6 +53,6 @@
|
||||
"tailwindcss": "^3.3.2",
|
||||
"typescript": "^5.0.4",
|
||||
"vite": "^4.3.5",
|
||||
"vitest": "^0.31.0"
|
||||
"vitest": "^0.32.2"
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,8 +113,8 @@ describe('WsProvider', () => {
|
||||
vi.spyOn(Date, 'now').mockReturnValue(123456);
|
||||
const config = {
|
||||
cameras: {
|
||||
front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true } },
|
||||
side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false } },
|
||||
front: { name: 'front', detect: { enabled: true }, record: { enabled: false }, snapshots: { enabled: true }, audio: { enabled: false } },
|
||||
side: { name: 'side', detect: { enabled: false }, record: { enabled: false }, snapshots: { enabled: false }, audio: { enabled: false } },
|
||||
},
|
||||
};
|
||||
render(
|
||||
|
||||
@ -41,10 +41,11 @@ export function WsProvider({
|
||||
|
||||
useEffect(() => {
|
||||
Object.keys(config.cameras).forEach((camera) => {
|
||||
const { name, record, detect, snapshots } = config.cameras[camera];
|
||||
const { name, record, detect, snapshots, audio } = config.cameras[camera];
|
||||
dispatch({ topic: `${name}/recordings/state`, payload: record.enabled ? 'ON' : 'OFF', retain: false });
|
||||
dispatch({ topic: `${name}/detect/state`, payload: detect.enabled ? 'ON' : 'OFF', retain: false });
|
||||
dispatch({ topic: `${name}/snapshots/state`, payload: snapshots.enabled ? 'ON' : 'OFF', retain: false });
|
||||
dispatch({ topic: `${name}/audio/state`, payload: audio.enabled ? 'ON' : 'OFF', retain: false });
|
||||
});
|
||||
}, [config]);
|
||||
|
||||
@ -120,6 +121,15 @@ export function useSnapshotsState(camera) {
|
||||
return { payload, send, connected };
|
||||
}
|
||||
|
||||
export function useAudioState(camera) {
|
||||
const {
|
||||
value: { payload },
|
||||
send,
|
||||
connected,
|
||||
} = useWs(`${camera}/audio/state`, `${camera}/audio/set`);
|
||||
return { payload, send, connected };
|
||||
}
|
||||
|
||||
export function usePtzCommand(camera) {
|
||||
const {
|
||||
value: { payload },
|
||||
|
||||
@ -28,13 +28,18 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
|
||||
const scaledHeight = useMemo(() => {
|
||||
const scaledHeight = Math.floor(availableWidth / aspectRatio);
|
||||
return stretch ? scaledHeight : Math.min(scaledHeight, height);
|
||||
const finalHeight = stretch ? scaledHeight : Math.min(scaledHeight, height);
|
||||
|
||||
if (finalHeight > 0) {
|
||||
return finalHeight;
|
||||
}
|
||||
|
||||
return 100;
|
||||
}, [availableWidth, aspectRatio, height, stretch]);
|
||||
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [
|
||||
scaledHeight,
|
||||
aspectRatio,
|
||||
scrollBarWidth,
|
||||
]);
|
||||
const scaledWidth = useMemo(
|
||||
() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth),
|
||||
[scaledHeight, aspectRatio, scrollBarWidth]
|
||||
);
|
||||
|
||||
const img = useMemo(() => new Image(), []);
|
||||
img.onload = useCallback(
|
||||
@ -58,18 +63,16 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
|
||||
|
||||
return (
|
||||
<div className="relative w-full" ref={containerRef}>
|
||||
{
|
||||
(enabled) ?
|
||||
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
|
||||
: <div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
|
||||
}
|
||||
{
|
||||
(!hasLoaded && enabled) ? (
|
||||
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
|
||||
<ActivityIndicator />
|
||||
</div>
|
||||
) : null
|
||||
}
|
||||
</div >
|
||||
{enabled ? (
|
||||
<canvas data-testid="cameraimage-canvas" height={scaledHeight} ref={canvasRef} width={scaledWidth} />
|
||||
) : (
|
||||
<div class="text-center pt-6">Camera is disabled in config, no stream or snapshot available!</div>
|
||||
)}
|
||||
{!hasLoaded && enabled ? (
|
||||
<div className="absolute inset-0 flex justify-center" style={`height: ${scaledHeight}px`}>
|
||||
<ActivityIndicator />
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -25,9 +25,9 @@ const timeAgo = ({ time, currentTime = new Date(), dense = false }: IProp): stri
|
||||
const elapsedTime: number = currentTime.getTime() - pastTime.getTime();
|
||||
|
||||
const timeUnits: TimeUnit[] = [
|
||||
{ unit: 'ye', full: 'year', value: 31536000 },
|
||||
{ unit: 'yr', full: 'year', value: 31536000 },
|
||||
{ unit: 'mo', full: 'month', value: 0 },
|
||||
{ unit: 'day', full: 'day', value: 86400 },
|
||||
{ unit: 'd', full: 'day', value: 86400 },
|
||||
{ unit: 'h', full: 'hour', value: 3600 },
|
||||
{ unit: 'm', full: 'minute', value: 60 },
|
||||
{ unit: 's', full: 'second', value: 1 },
|
||||
@ -58,11 +58,11 @@ const timeAgo = ({ time, currentTime = new Date(), dense = false }: IProp): stri
|
||||
|
||||
if (monthDiff > 0) {
|
||||
const unitAmount = monthDiff;
|
||||
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
|
||||
return `${unitAmount}${dense ? timeUnits[i].unit : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
|
||||
}
|
||||
} else if (elapsed >= timeUnits[i].value) {
|
||||
const unitAmount: number = Math.floor(elapsed / timeUnits[i].value);
|
||||
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
|
||||
return `${unitAmount}${dense ? timeUnits[i].unit : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
|
||||
}
|
||||
}
|
||||
return 'Invalid Time';
|
||||
|
||||
65
web/src/components/TimelineEventOverlay.jsx
Normal file
65
web/src/components/TimelineEventOverlay.jsx
Normal file
@ -0,0 +1,65 @@
|
||||
import { Fragment, h } from 'preact';
|
||||
import { useState } from 'preact/hooks';
|
||||
|
||||
export default function TimelineEventOverlay({ eventOverlay, cameraConfig }) {
|
||||
const boxLeftEdge = Math.round(eventOverlay.data.box[0] * 100);
|
||||
const boxTopEdge = Math.round(eventOverlay.data.box[1] * 100);
|
||||
const boxRightEdge = Math.round((1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100);
|
||||
const boxBottomEdge = Math.round((1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100);
|
||||
|
||||
const [isHovering, setIsHovering] = useState(false);
|
||||
const getHoverStyle = () => {
|
||||
if (boxLeftEdge < 15) {
|
||||
// show object stats on right side
|
||||
return {
|
||||
left: `${boxLeftEdge + eventOverlay.data.box[2] * 100 + 1}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
right: `${boxRightEdge + eventOverlay.data.box[2] * 100 + 1}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
};
|
||||
};
|
||||
|
||||
const getObjectArea = () => {
|
||||
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
|
||||
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
|
||||
return Math.round(width * height);
|
||||
};
|
||||
|
||||
const getObjectRatio = () => {
|
||||
const width = eventOverlay.data.box[2] * cameraConfig.detect.width;
|
||||
const height = eventOverlay.data.box[3] * cameraConfig.detect.height;
|
||||
return Math.round(100 * (width / height)) / 100;
|
||||
};
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
<div
|
||||
className="absolute border-4 border-red-600"
|
||||
onMouseEnter={() => setIsHovering(true)}
|
||||
onMouseLeave={() => setIsHovering(false)}
|
||||
onTouchStart={() => setIsHovering(true)}
|
||||
onTouchEnd={() => setIsHovering(false)}
|
||||
style={{
|
||||
left: `${boxLeftEdge}%`,
|
||||
top: `${boxTopEdge}%`,
|
||||
right: `${boxRightEdge}%`,
|
||||
bottom: `${boxBottomEdge}%`,
|
||||
}}
|
||||
>
|
||||
{eventOverlay.class_type == 'entered_zone' ? (
|
||||
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
|
||||
) : null}
|
||||
</div>
|
||||
{isHovering && (
|
||||
<div className="absolute bg-white dark:bg-slate-800 p-4 block dark:text-white text-lg" style={getHoverStyle()}>
|
||||
<div>{`Area: ${getObjectArea()} px`}</div>
|
||||
<div>{`Ratio: ${getObjectRatio()}`}</div>
|
||||
</div>
|
||||
)}
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
@ -29,7 +29,7 @@ export default function Tooltip({ relativeTo, text }) {
|
||||
let newLeft = left - Math.round(tipWidth / 2);
|
||||
// too far right
|
||||
if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) {
|
||||
newLeft = left - tipWidth - TIP_SPACE;
|
||||
newLeft = Math.max(0, left - tipWidth - TIP_SPACE);
|
||||
newTop = top - Math.round(tipHeight / 2);
|
||||
}
|
||||
// too far left
|
||||
|
||||
36
web/src/icons/Audio.jsx
Normal file
36
web/src/icons/Audio.jsx
Normal file
@ -0,0 +1,36 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function Snapshot({ className = 'h-6 w-6', stroke = 'currentColor', onClick = () => {} }) {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className={className}
|
||||
fill="none"
|
||||
viewBox="0 0 32 32"
|
||||
stroke={stroke}
|
||||
onClick={onClick}
|
||||
>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="2"
|
||||
d="M18 30v-2a10.011 10.011 0 0010-10h2a12.013 12.013 0 01-12 12z"
|
||||
/>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="2"
|
||||
d="M18 22v-2a2.002 2.002 0 002-2h2a4.004 4.004 0 01-4 4zM10 2a9.01 9.01 0 00-9 9h2a7 7 0 0114 0 7.09 7.09 0 01-3.501 6.135l-.499.288v3.073a2.935 2.935 0 01-.9 2.151 4.182 4.182 0 01-4.633 1.03A4.092 4.092 0 015 20H3a6.116 6.116 0 003.67 5.512 5.782 5.782 0 002.314.486 6.585 6.585 0 004.478-1.888A4.94 4.94 0 0015 20.496v-1.942A9.108 9.108 0 0019 11a9.01 9.01 0 00-9-9z"
|
||||
/>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
stroke-width="2"
|
||||
d="M9.28 8.082A3.006 3.006 0 0113 11h2a4.979 4.979 0 00-1.884-3.911 5.041 5.041 0 00-4.281-.957 4.95 4.95 0 00-3.703 3.703 5.032 5.032 0 002.304 5.458A3.078 3.078 0 019 17.924V20h2v-2.077a5.06 5.06 0 00-2.537-4.346 3.002 3.002 0 01.817-5.494z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(Snapshot);
|
||||
@ -23,6 +23,7 @@ const emptyObject = Object.freeze({});
|
||||
|
||||
export default function Camera({ camera }) {
|
||||
const { data: config } = useSWR('config');
|
||||
const { data: trackedLabels } = useSWR(['labels', { camera }]);
|
||||
const apiHost = useApiHost();
|
||||
const [showSettings, setShowSettings] = useState(false);
|
||||
const [viewMode, setViewMode] = useState('live');
|
||||
@ -124,7 +125,9 @@ export default function Camera({ camera }) {
|
||||
<div className="max-w-5xl">
|
||||
<video-stream
|
||||
mode="mse"
|
||||
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)}
|
||||
src={
|
||||
new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</Fragment>
|
||||
@ -206,7 +209,7 @@ export default function Camera({ camera }) {
|
||||
<div className="space-y-4">
|
||||
<Heading size="sm">Tracked objects</Heading>
|
||||
<div className="flex flex-wrap justify-start">
|
||||
{cameraConfig.objects.track.map((objectType) => (
|
||||
{(trackedLabels || []).map((objectType) => (
|
||||
<Card
|
||||
className="mb-4 mr-4"
|
||||
key={objectType}
|
||||
|
||||
@ -2,10 +2,11 @@ import { h, Fragment } from 'preact';
|
||||
import ActivityIndicator from '../components/ActivityIndicator';
|
||||
import Card from '../components/Card';
|
||||
import CameraImage from '../components/CameraImage';
|
||||
import AudioIcon from '../icons/Audio';
|
||||
import ClipIcon from '../icons/Clip';
|
||||
import MotionIcon from '../icons/Motion';
|
||||
import SnapshotIcon from '../icons/Snapshot';
|
||||
import { useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws';
|
||||
import { useAudioState, useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws';
|
||||
import { useMemo } from 'preact/hooks';
|
||||
import { useViewMode } from '../context'
|
||||
import { ViewModeTypes } from '../components/ViewOptionEnum';
|
||||
@ -45,6 +46,7 @@ function Camera({ name, config }) {
|
||||
const { payload: detectValue, send: sendDetect } = useDetectState(name);
|
||||
const { payload: recordValue, send: sendRecordings } = useRecordingsState(name);
|
||||
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
|
||||
const { payload: audioValue, send: sendAudio } = useAudioState(name);
|
||||
const href = `/cameras/${name}`;
|
||||
const buttons = useMemo(() => {
|
||||
return [
|
||||
@ -52,10 +54,9 @@ function Camera({ name, config }) {
|
||||
{ name: 'Recordings', href: `/recording/${name}` },
|
||||
];
|
||||
}, [name]);
|
||||
const cleanName = useMemo(
|
||||
() => { return `${name.replaceAll('_', ' ')}` },
|
||||
[name]
|
||||
);
|
||||
const cleanName = useMemo(() => {
|
||||
return `${name.replaceAll('_', ' ')}`;
|
||||
}, [name]);
|
||||
const icons = useMemo(
|
||||
() => [
|
||||
{
|
||||
@ -67,7 +68,9 @@ function Camera({ name, config }) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: config.record.enabled_in_config ? `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}` : 'Recordings must be enabled in the config to be turned on in the UI.',
|
||||
name: config.record.enabled_in_config
|
||||
? `Toggle recordings ${recordValue === 'ON' ? 'off' : 'on'}`
|
||||
: 'Recordings must be enabled in the config to be turned on in the UI.',
|
||||
icon: ClipIcon,
|
||||
color: config.record.enabled_in_config ? (recordValue === 'ON' ? 'blue' : 'gray') : 'red',
|
||||
onClick: () => {
|
||||
@ -84,13 +87,29 @@ function Camera({ name, config }) {
|
||||
sendSnapshots(snapshotValue === 'ON' ? 'OFF' : 'ON', true);
|
||||
},
|
||||
},
|
||||
],
|
||||
[config, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
|
||||
config.audio.enabled_in_config
|
||||
? {
|
||||
name: `Toggle audio detection ${audioValue === 'ON' ? 'off' : 'on'}`,
|
||||
icon: AudioIcon,
|
||||
color: audioValue === 'ON' ? 'blue' : 'gray',
|
||||
onClick: () => {
|
||||
sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true);
|
||||
},
|
||||
}
|
||||
: null,
|
||||
].filter((button) => button != null),
|
||||
[config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
|
||||
);
|
||||
|
||||
const { currentViewMode } = useViewMode();
|
||||
|
||||
return (
|
||||
<Card buttons={buttons} href={href} header={cleanName} icons={!currentViewMode || currentViewMode >= ViewModeTypes["admin"] ? icons : []} media={<CameraImage camera={name} stretch />} />
|
||||
<Card
|
||||
buttons={buttons}
|
||||
href={href}
|
||||
header={cleanName}
|
||||
icons={!currentViewMode || currentViewMode >= ViewModeTypes["admin"] ? icons : []}
|
||||
media={<CameraImage camera={name} stretch />}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@ -29,6 +29,7 @@ import { formatUnixTimestampToDateTime, getDurationFromTimestamps } from '../uti
|
||||
import TimeAgo from '../components/TimeAgo';
|
||||
import Timepicker from '../components/TimePicker';
|
||||
import TimelineSummary from '../components/TimelineSummary';
|
||||
import TimelineEventOverlay from '../components/TimelineEventOverlay';
|
||||
import ViewOption from '../components/ViewOption';
|
||||
|
||||
const API_LIMIT = 25;
|
||||
@ -107,6 +108,7 @@ export default function Events({ path, ...props }) {
|
||||
|
||||
const { data: config } = useSWR('config');
|
||||
|
||||
const { data: allLabels } = useSWR(['labels']);
|
||||
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
|
||||
|
||||
const filterValues = useMemo(
|
||||
@ -121,15 +123,10 @@ export default function Events({ path, ...props }) {
|
||||
.filter((value, i, self) => self.indexOf(value) === i),
|
||||
'None',
|
||||
],
|
||||
labels: Object.values(config?.cameras || {})
|
||||
.reduce((memo, camera) => {
|
||||
memo = memo.concat(camera?.objects?.track || []);
|
||||
return memo;
|
||||
}, config?.objects?.track || [])
|
||||
.filter((value, i, self) => self.indexOf(value) === i),
|
||||
labels: Object.values(allLabels || {}),
|
||||
sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [],
|
||||
}),
|
||||
[config, allSubLabels]
|
||||
[config, allLabels, allSubLabels]
|
||||
);
|
||||
|
||||
const onSave = async (e, eventId, save) => {
|
||||
@ -724,23 +721,10 @@ export default function Events({ path, ...props }) {
|
||||
}}
|
||||
>
|
||||
{eventOverlay ? (
|
||||
<div
|
||||
className="absolute border-4 border-red-600"
|
||||
style={{
|
||||
left: `${Math.round(eventOverlay.data.box[0] * 100)}%`,
|
||||
top: `${Math.round(eventOverlay.data.box[1] * 100)}%`,
|
||||
right: `${Math.round(
|
||||
(1 - eventOverlay.data.box[2] - eventOverlay.data.box[0]) * 100
|
||||
)}%`,
|
||||
bottom: `${Math.round(
|
||||
(1 - eventOverlay.data.box[3] - eventOverlay.data.box[1]) * 100
|
||||
)}%`,
|
||||
}}
|
||||
>
|
||||
{eventOverlay.class_type == 'entered_zone' ? (
|
||||
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] -translate-x-1/2 translate-y-3/4 bottom-0" />
|
||||
) : null}
|
||||
</div>
|
||||
<TimelineEventOverlay
|
||||
eventOverlay={eventOverlay}
|
||||
cameraConfig={config.cameras[event.camera]}
|
||||
/>
|
||||
) : null}
|
||||
</VideoPlayer>
|
||||
</div>
|
||||
|
||||
@ -18,9 +18,9 @@ export default function Export() {
|
||||
const localISODate = localDate.toISOString().split('T')[0];
|
||||
|
||||
const [startDate, setStartDate] = useState(localISODate);
|
||||
const [startTime, setStartTime] = useState("00:00");
|
||||
const [startTime, setStartTime] = useState('00:00');
|
||||
const [endDate, setEndDate] = useState(localISODate);
|
||||
const [endTime, setEndTime] = useState("23:59");
|
||||
const [endTime, setEndTime] = useState('23:59');
|
||||
|
||||
const onHandleExport = () => {
|
||||
if (camera == 'select') {
|
||||
@ -33,8 +33,6 @@ export default function Export() {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (!startDate || !startTime || !endDate || !endTime) {
|
||||
setMessage({ text: 'A start and end time needs to be selected', error: true });
|
||||
return;
|
||||
@ -48,12 +46,13 @@ export default function Export() {
|
||||
return;
|
||||
}
|
||||
|
||||
axios.post(`export/${camera}/start/${start}/end/${end}`, { playback })
|
||||
axios
|
||||
.post(`export/${camera}/start/${start}/end/${end}`, { playback })
|
||||
.then(() => {
|
||||
setMessage({ text: 'Successfully started export. View the file in the /exports folder.', error: false });
|
||||
})
|
||||
.catch((error) => {
|
||||
setMessage({ text: 'Failed to start export: '+error.response.data.message, error: true });
|
||||
setMessage({ text: `Failed to start export: ${error.response.data.message}`, error: true });
|
||||
});
|
||||
};
|
||||
|
||||
@ -93,13 +92,37 @@ export default function Export() {
|
||||
<Heading className="py-2" size="sm">
|
||||
From:
|
||||
</Heading>
|
||||
<input className="dark:bg-slate-800" id="startDate" type="date" value={startDate} onChange={(e) => setStartDate(e.target.value)}/>
|
||||
<input className="dark:bg-slate-800" id="startTime" type="time" value={startTime} onChange={(e) => setStartTime(e.target.value)}/>
|
||||
<input
|
||||
className="dark:bg-slate-800"
|
||||
id="startDate"
|
||||
type="date"
|
||||
value={startDate}
|
||||
onChange={(e) => setStartDate(e.target.value)}
|
||||
/>
|
||||
<input
|
||||
className="dark:bg-slate-800"
|
||||
id="startTime"
|
||||
type="time"
|
||||
value={startTime}
|
||||
onChange={(e) => setStartTime(e.target.value)}
|
||||
/>
|
||||
<Heading className="py-2" size="sm">
|
||||
To:
|
||||
</Heading>
|
||||
<input className="dark:bg-slate-800" id="endDate" type="date" value={endDate} onChange={(e) => setEndDate(e.target.value)}/>
|
||||
<input className="dark:bg-slate-800" id="endTime" type="time" value={endTime} onChange={(e) => setEndTime(e.target.value)}/>
|
||||
<input
|
||||
className="dark:bg-slate-800"
|
||||
id="endDate"
|
||||
type="date"
|
||||
value={endDate}
|
||||
onChange={(e) => setEndDate(e.target.value)}
|
||||
/>
|
||||
<input
|
||||
className="dark:bg-slate-800"
|
||||
id="endTime"
|
||||
type="time"
|
||||
value={endTime}
|
||||
onChange={(e) => setEndTime(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
<Button onClick={() => onHandleExport()}>Submit</Button>
|
||||
</div>
|
||||
|
||||
@ -337,7 +337,7 @@ export default function System() {
|
||||
<ActivityIndicator />
|
||||
) : (
|
||||
<div data-testid="cameras" className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4">
|
||||
{cameraNames.map((camera) => (
|
||||
{cameraNames.map((camera) => ( config.cameras[camera]["enabled"] && (
|
||||
<div key={camera} className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow">
|
||||
<div className="capitalize text-lg flex justify-between p-4">
|
||||
<Link href={`/cameras/${camera}`}>{camera.replaceAll('_', ' ')}</Link>
|
||||
@ -409,7 +409,7 @@ export default function System() {
|
||||
</Tbody>
|
||||
</Table>
|
||||
</div>
|
||||
</div>
|
||||
</div> )
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user