Formatting

This commit is contained in:
Nick Mowen 2023-10-16 16:14:55 -06:00
parent 6b49b81ba4
commit b72d9a49b2
2 changed files with 101 additions and 91 deletions

View File

@ -2,11 +2,18 @@
import logging import logging
import numpy import cv2
import numpy as np
from frigate.config import CameraConfig from frigate.config import CameraConfig, ModelConfig
from frigate.detectors.detector_config import PixelFormatEnum
from frigate.models import Timeline from frigate.models import Timeline
from frigate.util.image import calculate_region from frigate.util.image import (
calculate_region,
yuv_region_2_bgr,
yuv_region_2_rgb,
yuv_region_2_yuv,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -69,7 +76,9 @@ def get_camera_regions_grid(
1.35, 1.35,
) )
# save width of region to grid as relative # save width of region to grid as relative
grid[x_pos][y_pos]["sizes"].append((calculated_region[2] - calculated_region[0]) / width) grid[x_pos][y_pos]["sizes"].append(
(calculated_region[2] - calculated_region[0]) / width
)
for x in range(grid_size): for x in range(grid_size):
for y in range(grid_size): for y in range(grid_size):
@ -81,8 +90,8 @@ def get_camera_regions_grid(
if len(cell["sizes"]) == 0: if len(cell["sizes"]) == 0:
continue continue
std_dev = numpy.std(cell["sizes"]) std_dev = np.std(cell["sizes"])
mean = numpy.mean(cell["sizes"]) mean = np.mean(cell["sizes"])
logger.debug(f"std dev: {std_dev} mean: {mean}") logger.debug(f"std dev: {std_dev} mean: {mean}")
cell["std_dev"] = std_dev cell["std_dev"] = std_dev
cell["mean"] = mean cell["mean"] = mean
@ -143,3 +152,82 @@ def get_region_from_grid(
min_region, min_region,
) )
return new return new
def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
object_score = obj[1]
object_box = obj[2]
object_area = obj[3]
object_ratio = obj[4]
if object_name not in objects_to_track:
return True
if object_name in object_filters:
obj_settings = object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.min_area > object_area:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < object_area:
return True
# if the score is lower than the min_score, skip
if obj_settings.min_score > object_score:
return True
# if the object is not proportionally wide enough
if obj_settings.min_ratio > object_ratio:
return True
# if the object is proportionally too wide
if obj_settings.max_ratio < object_ratio:
return True
if obj_settings.mask is not None:
# compute the coordinates of the object and make sure
# the location isn't outside the bounds of the image (can happen from rounding)
object_xmin = object_box[0]
object_xmax = object_box[2]
object_ymax = object_box[3]
y_location = min(int(object_ymax), len(obj_settings.mask) - 1)
x_location = min(
int((object_xmax + object_xmin) / 2.0),
len(obj_settings.mask[0]) - 1,
)
# if the object is in a masked location, don't add it to detected objects
if obj_settings.mask[y_location][x_location] == 0:
return True
return False
def get_min_region_size(model_config: ModelConfig) -> int:
"""Get the min region size."""
return max(model_config.height, model_config.width)
def create_tensor_input(frame, model_config: ModelConfig, region):
if model_config.input_pixel_format == PixelFormatEnum.rgb:
cropped_frame = yuv_region_2_rgb(frame, region)
elif model_config.input_pixel_format == PixelFormatEnum.bgr:
cropped_frame = yuv_region_2_bgr(frame, region)
else:
cropped_frame = yuv_region_2_yuv(frame, region)
# Resize if needed
if cropped_frame.shape != (model_config.height, model_config.width, 3):
cropped_frame = cv2.resize(
cropped_frame,
dsize=(model_config.width, model_config.height),
interpolation=cv2.INTER_LINEAR,
)
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)

View File

@ -16,7 +16,6 @@ from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig, ModelConfig from frigate.config import CameraConfig, DetectConfig, ModelConfig
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
from frigate.detectors.detector_config import PixelFormatEnum
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector
@ -33,9 +32,6 @@ from frigate.util.image import (
draw_box_with_label, draw_box_with_label,
intersection, intersection,
intersection_over_union, intersection_over_union,
yuv_region_2_bgr,
yuv_region_2_rgb,
yuv_region_2_yuv,
) )
from frigate.util.object import get_cluster_region_from_grid from frigate.util.object import get_cluster_region_from_grid
from frigate.util.services import listen from frigate.util.services import listen
@ -43,85 +39,6 @@ from frigate.util.services import listen
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
object_score = obj[1]
object_box = obj[2]
object_area = obj[3]
object_ratio = obj[4]
if object_name not in objects_to_track:
return True
if object_name in object_filters:
obj_settings = object_filters[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.min_area > object_area:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < object_area:
return True
# if the score is lower than the min_score, skip
if obj_settings.min_score > object_score:
return True
# if the object is not proportionally wide enough
if obj_settings.min_ratio > object_ratio:
return True
# if the object is proportionally too wide
if obj_settings.max_ratio < object_ratio:
return True
if obj_settings.mask is not None:
# compute the coordinates of the object and make sure
# the location isn't outside the bounds of the image (can happen from rounding)
object_xmin = object_box[0]
object_xmax = object_box[2]
object_ymax = object_box[3]
y_location = min(int(object_ymax), len(obj_settings.mask) - 1)
x_location = min(
int((object_xmax + object_xmin) / 2.0),
len(obj_settings.mask[0]) - 1,
)
# if the object is in a masked location, don't add it to detected objects
if obj_settings.mask[y_location][x_location] == 0:
return True
return False
def get_min_region_size(model_config: ModelConfig) -> int:
"""Get the min region size."""
return max(model_config.height, model_config.width)
def create_tensor_input(frame, model_config: ModelConfig, region):
if model_config.input_pixel_format == PixelFormatEnum.rgb:
cropped_frame = yuv_region_2_rgb(frame, region)
elif model_config.input_pixel_format == PixelFormatEnum.bgr:
cropped_frame = yuv_region_2_bgr(frame, region)
else:
cropped_frame = yuv_region_2_yuv(frame, region)
# Resize if needed
if cropped_frame.shape != (model_config.height, model_config.width, 3):
cropped_frame = cv2.resize(
cropped_frame,
dsize=(model_config.width, model_config.height),
interpolation=cv2.INTER_LINEAR,
)
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)
def stop_ffmpeg(ffmpeg_process, logger): def stop_ffmpeg(ffmpeg_process, logger):
logger.info("Terminating the existing ffmpeg process...") logger.info("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate() ffmpeg_process.terminate()
@ -842,11 +759,16 @@ def process_frames(
# only add in the motion boxes when not calibrating # only add in the motion boxes when not calibrating
if not motion_detector.is_calibrating(): if not motion_detector.is_calibrating():
# find motion boxes that are not inside tracked object regions # find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [b for b in motion_boxes if not inside_any(b, regions)] standalone_motion_boxes = [
b for b in motion_boxes if not inside_any(b, regions)
]
if standalone_motion_boxes: if standalone_motion_boxes:
motion_clusters = get_cluster_candidates( motion_clusters = get_cluster_candidates(
frame_shape, region_min_size, standalone_motion_boxes, region_grid frame_shape,
region_min_size,
standalone_motion_boxes,
region_grid,
) )
motion_regions = [ motion_regions = [
get_cluster_region_from_grid( get_cluster_region_from_grid(