lightning detection

This commit is contained in:
Blake Blackshear 2023-06-08 07:28:58 -05:00
parent e344dea4c3
commit a1dce08bfc
4 changed files with 40 additions and 16 deletions

View File

@ -13,7 +13,7 @@ from frigate.motion.improved_motion import ImprovedMotionDetector
# get info on the video # get info on the video
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") # cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4") # cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4")
cap = cv2.VideoCapture("debug/motion_test_clips/low_contrast_ir.mp4") cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4")
# cap = cv2.VideoCapture("airport.mp4") # cap = cv2.VideoCapture("airport.mp4")
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

View File

@ -262,6 +262,12 @@ motion:
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
# The value should be between 1 and 255. # The value should be between 1 and 255.
threshold: 25 threshold: 25
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
# needs to recalibrate. (default: shown below)
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
# a doorbell camera.
lightning_threshold: 0.8
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30) # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30)
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
# make motion detection more sensitive to smaller moving objects. # make motion detection more sensitive to smaller moving objects.

View File

@ -13,9 +13,11 @@ from pydantic.fields import PrivateAttr
from frigate.const import CACHE_DIR, DEFAULT_DB_PATH, REGEX_CAMERA_NAME, YAML_EXT from frigate.const import CACHE_DIR, DEFAULT_DB_PATH, REGEX_CAMERA_NAME, YAML_EXT
from frigate.detectors import DetectorConfig, ModelConfig from frigate.detectors import DetectorConfig, ModelConfig
from frigate.detectors.detector_config import InputTensorEnum # noqa: F401 from frigate.detectors.detector_config import (
from frigate.detectors.detector_config import PixelFormatEnum # noqa: F401 BaseDetectorConfig,
from frigate.detectors.detector_config import BaseDetectorConfig InputTensorEnum, # noqa: F401
PixelFormatEnum, # noqa: F401
)
from frigate.ffmpeg_presets import ( from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_decode, parse_preset_hardware_acceleration_decode,
parse_preset_hardware_acceleration_scale, parse_preset_hardware_acceleration_scale,
@ -194,6 +196,9 @@ class MotionConfig(FrigateBaseModel):
ge=1, ge=1,
le=255, le=255,
) )
lightning_threshold: float = Field(
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
)
improve_contrast: bool = Field(default=False, title="Improve Contrast") improve_contrast: bool = Field(default=False, title="Improve Contrast")
contour_area: Optional[int] = Field(default=30, title="Contour Area") contour_area: Optional[int] = Field(default=30, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha") delta_alpha: float = Field(default=0.2, title="Delta Alpha")

View File

@ -34,6 +34,7 @@ class ImprovedMotionDetector(MotionDetector):
) )
self.mask = np.where(resized_mask == [0]) self.mask = np.where(resized_mask == [0])
self.save_images = False self.save_images = False
self.calibrating = True
self.improve_contrast = improve_contrast self.improve_contrast = improve_contrast
self.threshold = threshold self.threshold = threshold
self.contour_area = contour_area self.contour_area = contour_area
@ -59,7 +60,7 @@ class ImprovedMotionDetector(MotionDetector):
# mask frame # mask frame
resized_frame[self.mask] = [255] resized_frame[self.mask] = [255]
if self.save_images: if self.save_images or self.calibrating:
self.frame_counter += 1 self.frame_counter += 1
# compare to average # compare to average
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame)) frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
@ -78,9 +79,11 @@ class ImprovedMotionDetector(MotionDetector):
cnts = imutils.grab_contours(cnts) cnts = imutils.grab_contours(cnts)
# loop over the contours # loop over the contours
total_contour_area = 0
for c in cnts: for c in cnts:
# if the contour is big enough, count it as motion # if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
total_contour_area += contour_area
if contour_area > self.contour_area.value: if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c) x, y, w, h = cv2.boundingRect(c)
motion_boxes.append( motion_boxes.append(
@ -92,16 +95,26 @@ class ImprovedMotionDetector(MotionDetector):
) )
) )
pct_motion = total_contour_area / (
self.motion_frame_size[0] * self.motion_frame_size[1]
)
# once the motion drops to less than 1% for the first time, assume its calibrated
if pct_motion < 0.01:
self.calibrating = False
# if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate
if self.calibrating or pct_motion > self.config.lightning_threshold:
motion_boxes = []
self.calibrating = True
if self.save_images: if self.save_images:
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR) thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
for c in cnts: for b in motion_boxes:
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle( cv2.rectangle(
thresh_dilated, thresh_dilated,
(x, y), (int(b[0] / self.resize_factor), int(b[1] / self.resize_factor)),
(x + w, y + h), (int(b[2] / self.resize_factor), int(b[3] / self.resize_factor)),
(0, 0, 255), (0, 0, 255),
2, 2,
) )