Merge branch 'dev' of github.com:blakeblackshear/frigate into audio-events

This commit is contained in:
Nick Mowen 2023-06-28 06:22:31 -06:00
commit 46e1355586
20 changed files with 178 additions and 110 deletions

View File

@ -129,7 +129,9 @@ RUN apt-get -qq update \
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# scipy dependencies
gcc gfortran libopenblas-dev liblapack-dev && \
gcc gfortran libopenblas-dev liblapack-dev \
# faster-fifo dependencies
g++ cython3 && \
rm -rf /var/lib/apt/lists/*
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \

View File

@ -1,14 +1,13 @@
import datetime
import multiprocessing as mp
import os
from statistics import mean
import cv2
import numpy as np
from frigate.config import MotionConfig
from frigate.motion.frigate_motion import FrigateMotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.util import create_mask
# get info on the video
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
@ -20,84 +19,85 @@ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
frame_shape = (height, width, 3)
mask = create_mask(
(height, width),
[],
)
# create the motion config
motion_config = MotionConfig()
motion_config.mask = np.zeros((height, width), np.uint8)
motion_config.mask[:] = 255
motion_config.improve_contrast = 1
motion_config.frame_alpha = 0.02
motion_config.threshold = 40
motion_config.contour_area = 15
motion_config_1 = MotionConfig()
motion_config_1.mask = np.zeros((height, width), np.uint8)
motion_config_1.mask[:] = mask
# motion_config_1.improve_contrast = 1
# motion_config_1.frame_height = 150
# motion_config_1.frame_alpha = 0.02
# motion_config_1.threshold = 30
# motion_config_1.contour_area = 10
motion_config_2 = MotionConfig()
motion_config_2.mask = np.zeros((height, width), np.uint8)
motion_config_2.mask[:] = mask
# motion_config_2.improve_contrast = 1
# motion_config_2.frame_height = 150
# motion_config_2.frame_alpha = 0.01
# motion_config_2.threshold = 20
# motion_config.contour_area = 10
save_images = True
# create motion detectors
frigate_motion_detector = FrigateMotionDetector(
improved_motion_detector_1 = ImprovedMotionDetector(
frame_shape=frame_shape,
config=motion_config,
config=motion_config_1,
fps=fps,
improve_contrast=mp.Value("i", motion_config.improve_contrast),
threshold=mp.Value("i", motion_config.threshold),
contour_area=mp.Value("i", motion_config.contour_area),
improve_contrast=mp.Value("i", motion_config_1.improve_contrast),
threshold=mp.Value("i", motion_config_1.threshold),
contour_area=mp.Value("i", motion_config_1.contour_area),
name="default",
clipLimit=2.0,
tileGridSize=(8, 8),
)
frigate_motion_detector.save_images = save_images
improved_motion_detector_1.save_images = save_images
improved_motion_detector = ImprovedMotionDetector(
improved_motion_detector_2 = ImprovedMotionDetector(
frame_shape=frame_shape,
config=motion_config,
config=motion_config_2,
fps=fps,
improve_contrast=mp.Value("i", motion_config.improve_contrast),
threshold=mp.Value("i", motion_config.threshold),
contour_area=mp.Value("i", motion_config.contour_area),
improve_contrast=mp.Value("i", motion_config_2.improve_contrast),
threshold=mp.Value("i", motion_config_2.threshold),
contour_area=mp.Value("i", motion_config_2.contour_area),
name="compare",
)
improved_motion_detector.save_images = save_images
improved_motion_detector_2.save_images = save_images
# read and process frames
frame_times = {"frigate": [], "improved": []}
ret, frame = cap.read()
frame_counter = 1
while ret:
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
start_frame = datetime.datetime.now().timestamp()
frigate_motion_detector.detect(yuv_frame)
frame_times["frigate"].append(datetime.datetime.now().timestamp() - start_frame)
improved_motion_detector_1.detect(yuv_frame)
start_frame = datetime.datetime.now().timestamp()
improved_motion_detector.detect(yuv_frame)
frame_times["improved"].append(datetime.datetime.now().timestamp() - start_frame)
improved_motion_detector_2.detect(yuv_frame)
frigate_frame = f"debug/frames/frigate-{frame_counter}.jpg"
improved_frame = f"debug/frames/improved-{frame_counter}.jpg"
if os.path.exists(frigate_frame) and os.path.exists(improved_frame):
image_row_1 = cv2.hconcat(
[
cv2.imread(frigate_frame),
cv2.imread(improved_frame),
]
)
image_row_2 = cv2.resize(
frame,
dsize=(
frigate_motion_detector.motion_frame_size[1] * 2,
frigate_motion_detector.motion_frame_size[0] * 2,
),
interpolation=cv2.INTER_LINEAR,
)
default_frame = f"debug/frames/default-{frame_counter}.jpg"
compare_frame = f"debug/frames/compare-{frame_counter}.jpg"
if os.path.exists(default_frame) and os.path.exists(compare_frame):
images = [
cv2.imread(default_frame),
cv2.imread(compare_frame),
]
cv2.imwrite(
f"debug/frames/all-{frame_counter}.jpg",
cv2.vconcat([image_row_1, image_row_2]),
cv2.vconcat(images)
if frame_shape[0] > frame_shape[1]
else cv2.hconcat(images),
)
os.unlink(frigate_frame)
os.unlink(improved_frame)
os.unlink(default_frame)
os.unlink(compare_frame)
frame_counter += 1
ret, frame = cap.read()
cap.release()
print("Frigate Motion Detector")
print(f"Average frame processing time: {mean(frame_times['frigate'])*1000:.2f}ms")
print("Improved Motion Detector")
print(f"Average frame processing time: {mean(frame_times['improved'])*1000:.2f}ms")

View File

@ -275,7 +275,7 @@ motion:
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255.
threshold: 40
threshold: 20
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
@ -286,19 +286,19 @@ motion:
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects.
# As a rule of thumb:
# - 15 - high sensitivity
# - 10 - high sensitivity
# - 30 - medium sensitivity
# - 50 - low sensitivity
contour_area: 15
contour_area: 10
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
# Low values will cause things like moving shadows to be detected as motion for longer.
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
frame_alpha: 0.02
frame_alpha: 0.01
# Optional: Height of the resized motion frame (default: 50)
# Higher values will result in more granular motion detection at the expense of higher CPU usage.
# Lower values result in less CPU, but small changes may not register as motion.
frame_height: 50
frame_height: 100
# Optional: motion mask
# NOTE: see docs for more detailed info on creating masks
mask: 0,900,1080,900,1080,1920,0,1920

View File

@ -6,12 +6,12 @@ import shutil
import signal
import sys
import traceback
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
from types import FrameType
from typing import Optional
import psutil
from faster_fifo import Queue
from peewee_migrate import Router
from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase

View File

@ -188,7 +188,7 @@ class RecordConfig(FrigateBaseModel):
class MotionConfig(FrigateBaseModel):
threshold: int = Field(
default=40,
default=20,
title="Motion detection threshold (1-255).",
ge=1,
le=255,
@ -197,10 +197,10 @@ class MotionConfig(FrigateBaseModel):
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
)
improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=15, title="Contour Area")
contour_area: Optional[int] = Field(default=10, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.02, title="Frame Alpha")
frame_height: Optional[int] = Field(default=50, title="Frame Height")
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
frame_height: Optional[int] = Field(default=100, title="Frame Height")
mask: Union[str, List[str]] = Field(
default="", title="Coordinates polygon for the motion mask."
)

View File

@ -13,6 +13,16 @@ PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
# Attributes
ATTRIBUTE_LABEL_MAP = {
"person": ["face", "amazon"],
"car": ["ups", "fedex", "amazon", "license_plate"],
}
ALL_ATTRIBUTE_LABELS = [
item for sublist in ATTRIBUTE_LABEL_MAP.values() for item in sublist
]
# Audio Consts
AUDIO_DURATION = 0.975

View File

@ -6,10 +6,10 @@ import logging
import os
import random
import string
from multiprocessing.queues import Queue
from typing import Optional
import cv2
from faster_fifo import Queue
from frigate.config import CameraConfig, FrigateConfig
from frigate.const import CLIPS_DIR

View File

@ -3,10 +3,11 @@ import logging
import queue
import threading
from enum import Enum
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
from typing import Dict
from faster_fifo import Queue
from frigate.config import EventsConfig, FrigateConfig
from frigate.models import Event
from frigate.types import CameraMetricsTypes

View File

@ -410,6 +410,24 @@ def set_sub_label(id):
)
@bp.route("/labels")
def get_labels():
camera = request.args.get("camera", type=str, default="")
try:
if camera:
events = Event.select(Event.label).where(Event.camera == camera).distinct()
else:
events = Event.select(Event.label).distinct()
except Exception as e:
return jsonify(
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
)
labels = sorted([e.label for e in events])
return jsonify(labels)
@bp.route("/sub_labels")
def get_sub_labels():
split_joined = request.args.get("split_joined", type=int)

View File

@ -7,10 +7,10 @@ import signal
import threading
from collections import deque
from logging import handlers
from multiprocessing.queues import Queue
from types import FrameType
from typing import Deque, Optional
from faster_fifo import Queue
from setproctitle import setproctitle
from frigate.util import clean_camera_user_pass

View File

@ -15,7 +15,11 @@ class ImprovedMotionDetector(MotionDetector):
improve_contrast,
threshold,
contour_area,
clipLimit=2.0,
tileGridSize=(2, 2),
name="improved",
):
self.name = name
self.config = config
self.frame_shape = frame_shape
self.resize_factor = frame_shape[0] / config.frame_height
@ -38,6 +42,7 @@ class ImprovedMotionDetector(MotionDetector):
self.improve_contrast = improve_contrast
self.threshold = threshold
self.contour_area = contour_area
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
def detect(self, frame):
motion_boxes = []
@ -51,11 +56,20 @@ class ImprovedMotionDetector(MotionDetector):
interpolation=cv2.INTER_LINEAR,
)
if self.save_images:
resized_saved = resized_frame.copy()
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
if self.save_images:
blurred_saved = resized_frame.copy()
# Improve contrast
if self.improve_contrast.value:
resized_frame = cv2.equalizeHist(resized_frame)
resized_frame = self.clahe.apply(resized_frame)
if self.save_images:
contrasted_saved = resized_frame.copy()
# mask frame
resized_frame[self.mask] = [255]
@ -118,8 +132,19 @@ class ImprovedMotionDetector(MotionDetector):
(0, 0, 255),
2,
)
frames = [
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
thresh_dilated,
]
cv2.imwrite(
f"debug/frames/improved-{self.frame_counter}.jpg", thresh_dilated
f"debug/frames/{self.name}-{self.frame_counter}.jpg",
cv2.hconcat(frames)
if self.frame_shape[0] > self.frame_shape[1]
else cv2.vconcat(frames),
)
if len(motion_boxes) > 0:

View File

@ -730,7 +730,7 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].motion.frame_height == 50
assert runtime_config.cameras["back"].motion.frame_height == 100
def test_motion_contour_area_dynamic(self):
config = {
@ -758,7 +758,7 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config()
assert round(runtime_config.cameras["back"].motion.contour_area) == 15
assert round(runtime_config.cameras["back"].motion.contour_area) == 10
def test_merge_labelmap(self):
config = {

View File

@ -3,9 +3,10 @@
import logging
import queue
import threading
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
from faster_fifo import Queue
from frigate.config import FrigateConfig
from frigate.events.maintainer import EventTypeEnum
from frigate.models import Timeline

View File

@ -1,8 +1,9 @@
from multiprocessing.context import Process
from multiprocessing.queues import Queue
from multiprocessing.sharedctypes import Synchronized
from typing import Optional, TypedDict
from faster_fifo import Queue
from frigate.object_detection import ObjectDetectProcess

View File

@ -650,34 +650,42 @@ def restart_frigate():
class EventsPerSecond:
def __init__(self, max_events=1000):
def __init__(self, max_events=1000, last_n_seconds=10):
self._start = None
self._max_events = max_events
self._last_n_seconds = last_n_seconds
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
def update(self):
now = datetime.datetime.now().timestamp()
if self._start is None:
self.start()
self._timestamps.append(datetime.datetime.now().timestamp())
self._start = now
self._timestamps.append(now)
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
self.expire_timestamps(now)
def eps(self, last_n_seconds=10):
if self._start is None:
self.start()
# compute the (approximate) events in the last n seconds
def eps(self):
now = datetime.datetime.now().timestamp()
seconds = min(now - self._start, last_n_seconds)
if self._start is None:
self._start = now
# compute the (approximate) events in the last n seconds
self.expire_timestamps(now)
seconds = min(now - self._start, self._last_n_seconds)
# avoid divide by zero
if seconds == 0:
seconds = 1
return (
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
)
return len(self._timestamps) / seconds
# remove aged out timestamps
def expire_timestamps(self, now):
threshold = now - self._last_n_seconds
while self._timestamps and self._timestamps[0] < threshold:
del self._timestamps[0]
def print_stack(sig, frame):

View File

@ -15,7 +15,7 @@ import numpy as np
from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig
from frigate.const import CACHE_DIR
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
from frigate.detectors.detector_config import PixelFormatEnum
from frigate.log import LogPipe
from frigate.motion import MotionDetector
@ -172,7 +172,7 @@ def capture_frames(
skipped_eps.start()
while True:
fps.value = frame_rate.eps()
skipped_eps.eps()
skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.datetime.now().timestamp()
frame_name = f"{camera_name}{current_frame.value}"
@ -215,6 +215,7 @@ class CameraWatchdog(threading.Thread):
config: CameraConfig,
frame_queue,
camera_fps,
skipped_fps,
ffmpeg_pid,
stop_event,
):
@ -227,6 +228,7 @@ class CameraWatchdog(threading.Thread):
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
self.ffmpeg_other_processes: list[dict[str, any]] = []
self.camera_fps = camera_fps
self.skipped_fps = skipped_fps
self.ffmpeg_pid = ffmpeg_pid
self.frame_queue = frame_queue
self.frame_shape = self.config.frame_shape_yuv
@ -346,6 +348,7 @@ class CameraWatchdog(threading.Thread):
self.frame_shape,
self.frame_queue,
self.camera_fps,
self.skipped_fps,
self.stop_event,
)
self.capture_thread.start()
@ -376,7 +379,14 @@ class CameraWatchdog(threading.Thread):
class CameraCapture(threading.Thread):
def __init__(
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
self,
camera_name,
ffmpeg_process,
frame_shape,
frame_queue,
fps,
skipped_fps,
stop_event,
):
threading.Thread.__init__(self)
self.name = f"capture:{camera_name}"
@ -385,14 +395,13 @@ class CameraCapture(threading.Thread):
self.frame_queue = frame_queue
self.fps = fps
self.stop_event = stop_event
self.skipped_fps = EventsPerSecond()
self.skipped_fps = skipped_fps
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
self.current_frame = mp.Value("d", 0.0)
self.last_frame = 0
def run(self):
self.skipped_fps.start()
capture_frames(
self.ffmpeg_process,
self.camera_name,
@ -424,6 +433,7 @@ def capture_camera(name, config: CameraConfig, process_info):
config,
frame_queue,
process_info["camera_fps"],
process_info["skipped_fps"],
process_info["ffmpeg_pid"],
stop_event,
)
@ -723,14 +733,6 @@ def process_frames(
stop_event,
exit_on_empty: bool = False,
):
# attribute labels are not tracked and are not assigned regions
attribute_label_map = {
"person": ["face", "amazon"],
"car": ["ups", "fedex", "amazon", "license_plate"],
}
all_attribute_labels = [
item for sublist in attribute_label_map.values() for item in sublist
]
fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"]
@ -906,7 +908,7 @@ def process_frames(
tracked_detections = [
d
for d in consolidated_detections
if d[0] not in all_attribute_labels
if d[0] not in ALL_ATTRIBUTE_LABELS
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections)
@ -916,7 +918,7 @@ def process_frames(
# group the attribute detections based on what label they apply to
attribute_detections = {}
for label, attribute_labels in attribute_label_map.items():
for label, attribute_labels in ATTRIBUTE_LABEL_MAP.items():
attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels
]

View File

@ -1,5 +1,6 @@
click == 8.1.*
Flask == 2.3.*
faster-fifo == 1.4.*
imutils == 0.5.*
matplotlib == 3.7.*
mypy == 0.942

View File

@ -29,7 +29,7 @@ export default function Tooltip({ relativeTo, text }) {
let newLeft = left - Math.round(tipWidth / 2);
// too far right
if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) {
newLeft = left - tipWidth - TIP_SPACE;
newLeft = Math.max(0, left - tipWidth - TIP_SPACE);
newTop = top - Math.round(tipHeight / 2);
}
// too far left

View File

@ -22,6 +22,7 @@ const emptyObject = Object.freeze({});
export default function Camera({ camera }) {
const { data: config } = useSWR('config');
const { data: trackedLabels } = useSWR(['labels', { camera }]);
const apiHost = useApiHost();
const [showSettings, setShowSettings] = useState(false);
const [viewMode, setViewMode] = useState('live');
@ -121,7 +122,9 @@ export default function Camera({ camera }) {
<div className="max-w-5xl">
<video-stream
mode="mse"
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)}
src={
new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)
}
/>
</div>
</Fragment>
@ -203,7 +206,7 @@ export default function Camera({ camera }) {
<div className="space-y-4">
<Heading size="sm">Tracked objects</Heading>
<div className="flex flex-wrap justify-start">
{cameraConfig.objects.track.map((objectType) => (
{(trackedLabels || []).map((objectType) => (
<Card
className="mb-4 mr-4"
key={objectType}

View File

@ -106,6 +106,7 @@ export default function Events({ path, ...props }) {
const { data: config } = useSWR('config');
const { data: allLabels } = useSWR(['labels']);
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
const filterValues = useMemo(
@ -120,15 +121,10 @@ export default function Events({ path, ...props }) {
.filter((value, i, self) => self.indexOf(value) === i),
'None',
],
labels: Object.values(config?.cameras || {})
.reduce((memo, camera) => {
memo = memo.concat(camera?.objects?.track || []);
return memo;
}, config?.objects?.track || [])
.filter((value, i, self) => self.indexOf(value) === i),
labels: Object.values(allLabels || {}),
sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [],
}),
[config, allSubLabels]
[config, allLabels, allSubLabels]
);
const onSave = async (e, eventId, save) => {