create a simplified motion detector

This commit is contained in:
Blake Blackshear 2023-06-07 06:55:09 -05:00
parent 3c58d44f91
commit e344dea4c3
3 changed files with 238 additions and 17 deletions

103
benchmark_motion.py Normal file
View File

@ -0,0 +1,103 @@
import datetime
import multiprocessing as mp
import os
from statistics import mean
import cv2
import numpy as np
from frigate.config import MotionConfig
from frigate.motion.frigate_motion import FrigateMotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector
# get info on the video
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
cap = cv2.VideoCapture("debug/motion_test_clips/low_contrast_ir.mp4")
# cap = cv2.VideoCapture("airport.mp4")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
frame_shape = (height, width, 3)
# create the motion config
motion_config = MotionConfig()
motion_config.mask = np.zeros((height, width), np.uint8)
motion_config.mask[:] = 255
motion_config.improve_contrast = 1
motion_config.frame_alpha = 0.02
motion_config.threshold = 40
motion_config.contour_area = 15
save_images = True
# create motion detectors
frigate_motion_detector = FrigateMotionDetector(
frame_shape=frame_shape,
config=motion_config,
fps=fps,
improve_contrast=mp.Value("i", motion_config.improve_contrast),
threshold=mp.Value("i", motion_config.threshold),
contour_area=mp.Value("i", motion_config.contour_area),
)
frigate_motion_detector.save_images = save_images
improved_motion_detector = ImprovedMotionDetector(
frame_shape=frame_shape,
config=motion_config,
fps=fps,
improve_contrast=mp.Value("i", motion_config.improve_contrast),
threshold=mp.Value("i", motion_config.threshold),
contour_area=mp.Value("i", motion_config.contour_area),
)
improved_motion_detector.save_images = save_images
# read and process frames
frame_times = {"frigate": [], "improved": []}
ret, frame = cap.read()
frame_counter = 1
while ret:
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
start_frame = datetime.datetime.now().timestamp()
frigate_motion_detector.detect(yuv_frame)
frame_times["frigate"].append(datetime.datetime.now().timestamp() - start_frame)
start_frame = datetime.datetime.now().timestamp()
improved_motion_detector.detect(yuv_frame)
frame_times["improved"].append(datetime.datetime.now().timestamp() - start_frame)
frigate_frame = f"debug/frames/frigate-{frame_counter}.jpg"
improved_frame = f"debug/frames/improved-{frame_counter}.jpg"
if os.path.exists(frigate_frame) and os.path.exists(improved_frame):
image_row_1 = cv2.hconcat(
[
cv2.imread(frigate_frame),
cv2.imread(improved_frame),
]
)
image_row_2 = cv2.resize(
frame,
dsize=(
frigate_motion_detector.motion_frame_size[1] * 2,
frigate_motion_detector.motion_frame_size[0] * 2,
),
interpolation=cv2.INTER_LINEAR,
)
cv2.imwrite(
f"debug/frames/all-{frame_counter}.jpg",
cv2.vconcat([image_row_1, image_row_2]),
)
os.unlink(frigate_frame)
os.unlink(improved_frame)
frame_counter += 1
ret, frame = cap.read()
cap.release()
print("Frigate Motion Detector")
print(f"Average frame processing time: {mean(frame_times['frigate'])*1000:.2f}ms")
print("Improved Motion Detector")
print(f"Average frame processing time: {mean(frame_times['improved'])*1000:.2f}ms")

View File

@ -12,9 +12,9 @@ class FrigateMotionDetector(MotionDetector):
frame_shape,
config: MotionConfig,
fps: int,
improve_contrast_enabled,
motion_threshold,
motion_contour_area,
improve_contrast,
threshold,
contour_area,
):
self.config = config
self.frame_shape = frame_shape
@ -34,9 +34,9 @@ class FrigateMotionDetector(MotionDetector):
)
self.mask = np.where(resized_mask == [0])
self.save_images = False
self.improve_contrast = improve_contrast_enabled
self.threshold = motion_threshold
self.contour_area = motion_contour_area
self.improve_contrast = improve_contrast
self.threshold = threshold
self.contour_area = contour_area
def detect(self, frame):
motion_boxes = []
@ -132,18 +132,10 @@ class FrigateMotionDetector(MotionDetector):
(0, 0, 255),
2,
)
# print("--------")
image_row_1 = cv2.hconcat(
[
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(avg_delta_image, cv2.COLOR_GRAY2BGR),
]
cv2.imwrite(
f"debug/frames/frigate-{self.frame_counter}.jpg", thresh_dilated
)
image_row_2 = cv2.hconcat(
[cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), thresh_dilated]
)
combined_image = cv2.vconcat([image_row_1, image_row_2])
cv2.imwrite(f"motion/motion-{self.frame_counter}.jpg", combined_image)
if len(motion_boxes) > 0:
self.motion_frame_count += 1

View File

@ -0,0 +1,126 @@
import cv2
import imutils
import numpy as np
from frigate.config import MotionConfig
from frigate.motion import MotionDetector
class ImprovedMotionDetector(MotionDetector):
def __init__(
self,
frame_shape,
config: MotionConfig,
fps: int,
improve_contrast,
threshold,
contour_area,
):
self.config = config
self.frame_shape = frame_shape
self.resize_factor = frame_shape[0] / config.frame_height
self.motion_frame_size = (
config.frame_height,
config.frame_height * frame_shape[1] // frame_shape[0],
)
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
self.motion_frame_count = 0
self.frame_counter = 0
resized_mask = cv2.resize(
config.mask,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
self.mask = np.where(resized_mask == [0])
self.save_images = False
self.improve_contrast = improve_contrast
self.threshold = threshold
self.contour_area = contour_area
def detect(self, frame):
motion_boxes = []
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
# resize frame
resized_frame = cv2.resize(
gray,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
# Improve contrast
if self.improve_contrast.value:
resized_frame = cv2.equalizeHist(resized_frame)
# mask frame
resized_frame[self.mask] = [255]
if self.save_images:
self.frame_counter += 1
# compare to average
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
# compute the threshold image for the current frame
thresh = cv2.threshold(
frameDelta, self.threshold.value, 255, cv2.THRESH_BINARY
)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c)
motion_boxes.append(
(
int(x * self.resize_factor),
int(y * self.resize_factor),
int((x + w) * self.resize_factor),
int((y + h) * self.resize_factor),
)
)
if self.save_images:
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
for c in cnts:
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(
thresh_dilated,
(x, y),
(x + w, y + h),
(0, 0, 255),
2,
)
cv2.imwrite(
f"debug/frames/improved-{self.frame_counter}.jpg", thresh_dilated
)
if len(motion_boxes) > 0:
self.motion_frame_count += 1
if self.motion_frame_count >= 10:
# only average in the current frame if the difference persists for a bit
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
self.motion_frame_count = 0
return motion_boxes