Separate object reduction to own function and reduce confidence of boxes on edge of region

This commit is contained in:
Nick Mowen 2023-10-23 14:37:32 -06:00
parent e9376ca285
commit c4507901f3
2 changed files with 91 additions and 81 deletions

View File

@ -3,6 +3,7 @@
import datetime import datetime
import logging import logging
import math import math
from collections import defaultdict
import cv2 import cv2
import numpy as np import numpy as np
@ -15,6 +16,7 @@ from frigate.models import Event, Regions, Timeline
from frigate.util.image import ( from frigate.util.image import (
area, area,
calculate_region, calculate_region,
clipped,
intersection, intersection,
intersection_over_union, intersection_over_union,
yuv_region_2_bgr, yuv_region_2_bgr,
@ -414,43 +416,6 @@ def get_cluster_region(frame_shape, min_region, cluster, boxes):
) )
def get_consolidated_object_detections(detected_object_groups):
"""Drop detections that overlap too much"""
consolidated_detections = []
for group in detected_object_groups.values():
# if the group only has 1 item, skip
if len(group) == 1:
consolidated_detections.append(group[0])
continue
# sort smallest to largest by area
sorted_by_area = sorted(group, key=lambda g: g[3])
for current_detection_idx in range(0, len(sorted_by_area)):
current_detection = sorted_by_area[current_detection_idx]
current_label = current_detection[0]
current_box = current_detection[2]
overlap = 0
for to_check_idx in range(
min(current_detection_idx + 1, len(sorted_by_area)),
len(sorted_by_area),
):
to_check = sorted_by_area[to_check_idx][2]
intersect_box = intersection(current_box, to_check)
# if 90% of smaller detection is inside of another detection, consolidate
if intersect_box is not None and area(intersect_box) / area(
current_box
) > LABEL_CONSOLIDATION_MAP.get(
current_label, LABEL_CONSOLIDATION_DEFAULT
):
overlap = 1
break
if overlap == 0:
consolidated_detections.append(sorted_by_area[current_detection_idx])
return consolidated_detections
def get_startup_regions( def get_startup_regions(
frame_shape: tuple[int], frame_shape: tuple[int],
region_min_size: int, region_min_size: int,
@ -483,3 +448,90 @@ def get_startup_regions(
) )
return regions return regions
def reduce_detections(
frame_shape: tuple[int],
all_detections: list[tuple[any]],
) -> list[tuple[any]]:
"""Take a list of detections and reduce overlaps to create a list of confident detections."""
def reduce_overlapping_detections(detections: list[tuple[any]]) -> list[tuple[any]]:
"""apply non-maxima suppression to suppress weak, overlapping bounding boxes."""
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
selected_objects = []
for group in detected_object_groups.values():
# o[2] is the box of the object: xmin, ymin, xmax, ymax
# apply max/min to ensure values do not exceed the known frame size
boxes = [
(
o[2][0],
o[2][1],
o[2][2] - o[2][0],
o[2][3] - o[2][1],
)
for o in group
]
# reduce confidences for objects that are on edge of region
confidences = [o[1] / 2 if clipped(o, frame_shape) else o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# add objects
for index in idxs:
index = index if isinstance(index, np.int32) else index[0]
obj = group[index]
selected_objects.append(obj)
# set the detections list to only include top objects
return selected_objects
def get_consolidated_object_detections(detections: list[tuple[any]]):
"""Drop detections that overlap too much."""
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
consolidated_detections = []
for group in detected_object_groups.values():
# if the group only has 1 item, skip
if len(group) == 1:
consolidated_detections.append(group[0])
continue
# sort smallest to largest by area
sorted_by_area = sorted(group, key=lambda g: g[3])
for current_detection_idx in range(0, len(sorted_by_area)):
current_detection = sorted_by_area[current_detection_idx]
current_label = current_detection[0]
current_box = current_detection[2]
overlap = 0
for to_check_idx in range(
min(current_detection_idx + 1, len(sorted_by_area)),
len(sorted_by_area),
):
to_check = sorted_by_area[to_check_idx][2]
intersect_box = intersection(current_box, to_check)
# if 90% of smaller detection is inside of another detection, consolidate
if intersect_box is not None and area(intersect_box) / area(
current_box
) > LABEL_CONSOLIDATION_MAP.get(
current_label, LABEL_CONSOLIDATION_DEFAULT
):
overlap = 1
break
if overlap == 0:
consolidated_detections.append(
sorted_by_area[current_detection_idx]
)
return consolidated_detections
return get_consolidated_object_detections(
reduce_overlapping_detections(all_detections)
)

View File

@ -7,10 +7,8 @@ import signal
import subprocess as sp import subprocess as sp
import threading import threading
import time import time
from collections import defaultdict
import cv2 import cv2
import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.config import CameraConfig, DetectConfig, ModelConfig
@ -39,12 +37,12 @@ from frigate.util.object import (
get_cluster_candidates, get_cluster_candidates,
get_cluster_region, get_cluster_region,
get_cluster_region_from_grid, get_cluster_region_from_grid,
get_consolidated_object_detections,
get_min_region_size, get_min_region_size,
get_startup_regions, get_startup_regions,
inside_any, inside_any,
intersects_any, intersects_any,
is_object_filtered, is_object_filtered,
reduce_detections,
) )
from frigate.util.services import listen from frigate.util.services import listen
@ -688,50 +686,10 @@ def process_frames(
) )
) )
######### consolidated_detections = reduce_detections(frame_shape, detections)
# merge objects
#########
# group by name
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
# o[2] is the box of the object: xmin, ymin, xmax, ymax
# apply max/min to ensure values do not exceed the known frame size
boxes = [
(
o[2][0],
o[2][1],
o[2][2] - o[2][0],
o[2][3] - o[2][1],
)
for o in group
]
confidences = [o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# add objects
for index in idxs:
index = index if isinstance(index, np.int32) else index[0]
obj = group[index]
selected_objects.append(obj)
# set the detections list to only include top objects
detections = selected_objects
# if detection was run on this frame, consolidate # if detection was run on this frame, consolidate
if len(regions) > 0: if len(regions) > 0:
# group by name
detected_object_groups = defaultdict(lambda: [])
for detection in detections:
detected_object_groups[detection[0]].append(detection)
consolidated_detections = get_consolidated_object_detections(
detected_object_groups
)
tracked_detections = [ tracked_detections = [
d d
for d in consolidated_detections for d in consolidated_detections