diff --git a/benchmark_motion.py b/benchmark_motion.py index c0dcc89c3..a9ecc56b2 100644 --- a/benchmark_motion.py +++ b/benchmark_motion.py @@ -13,7 +13,7 @@ from frigate.motion.improved_motion import ImprovedMotionDetector # get info on the video # cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") # cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4") -cap = cv2.VideoCapture("debug/motion_test_clips/low_contrast_ir.mp4") +cap = cv2.VideoCapture("debug/motion_test_clips/ir_off.mp4") # cap = cv2.VideoCapture("airport.mp4") width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 980d14465..4738d9236 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -262,6 +262,12 @@ motion: # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # The value should be between 1 and 255. threshold: 25 + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection + # needs to recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching + # a doorbell camera. + lightning_threshold: 0.8 # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30) # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will # make motion detection more sensitive to smaller moving objects. diff --git a/frigate/config.py b/frigate/config.py index 156a755c5..213eeddff 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -13,9 +13,11 @@ from pydantic.fields import PrivateAttr from frigate.const import CACHE_DIR, DEFAULT_DB_PATH, REGEX_CAMERA_NAME, YAML_EXT from frigate.detectors import DetectorConfig, ModelConfig -from frigate.detectors.detector_config import InputTensorEnum # noqa: F401 -from frigate.detectors.detector_config import PixelFormatEnum # noqa: F401 -from frigate.detectors.detector_config import BaseDetectorConfig +from frigate.detectors.detector_config import ( + BaseDetectorConfig, + InputTensorEnum, # noqa: F401 + PixelFormatEnum, # noqa: F401 +) from frigate.ffmpeg_presets import ( parse_preset_hardware_acceleration_decode, parse_preset_hardware_acceleration_scale, @@ -194,6 +196,9 @@ class MotionConfig(FrigateBaseModel): ge=1, le=255, ) + lightning_threshold: float = Field( + default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0 + ) improve_contrast: bool = Field(default=False, title="Improve Contrast") contour_area: Optional[int] = Field(default=30, title="Contour Area") delta_alpha: float = Field(default=0.2, title="Delta Alpha") diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 568aef534..4e68720ba 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -34,6 +34,7 @@ class ImprovedMotionDetector(MotionDetector): ) self.mask = np.where(resized_mask == [0]) self.save_images = False + self.calibrating = True self.improve_contrast = improve_contrast self.threshold = threshold self.contour_area = contour_area @@ -59,7 +60,7 @@ class ImprovedMotionDetector(MotionDetector): # mask frame resized_frame[self.mask] = [255] - if self.save_images: + if self.save_images or self.calibrating: self.frame_counter += 1 # compare to average frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame)) @@ -78,9 +79,11 @@ class ImprovedMotionDetector(MotionDetector): cnts = imutils.grab_contours(cnts) # loop over the contours + total_contour_area = 0 for c in cnts: # if the contour is big enough, count it as motion contour_area = cv2.contourArea(c) + total_contour_area += contour_area if contour_area > self.contour_area.value: x, y, w, h = cv2.boundingRect(c) motion_boxes.append( @@ -92,19 +95,29 @@ class ImprovedMotionDetector(MotionDetector): ) ) + pct_motion = total_contour_area / ( + self.motion_frame_size[0] * self.motion_frame_size[1] + ) + + # once the motion drops to less than 1% for the first time, assume its calibrated + if pct_motion < 0.01: + self.calibrating = False + + # if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate + if self.calibrating or pct_motion > self.config.lightning_threshold: + motion_boxes = [] + self.calibrating = True + if self.save_images: thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR) - for c in cnts: - contour_area = cv2.contourArea(c) - if contour_area > self.contour_area.value: - x, y, w, h = cv2.boundingRect(c) - cv2.rectangle( - thresh_dilated, - (x, y), - (x + w, y + h), - (0, 0, 255), - 2, - ) + for b in motion_boxes: + cv2.rectangle( + thresh_dilated, + (int(b[0] / self.resize_factor), int(b[1] / self.resize_factor)), + (int(b[2] / self.resize_factor), int(b[3] / self.resize_factor)), + (0, 0, 255), + 2, + ) cv2.imwrite( f"debug/frames/improved-{self.frame_counter}.jpg", thresh_dilated )