Move track_camera into CameraTracker process

This commit is contained in:
George Tsiamasiotis 2024-10-02 09:17:45 +03:00
parent abe4ce43f3
commit 4fc970dbdd
2 changed files with 392 additions and 434 deletions

View File

@ -6,7 +6,7 @@ from typing import Optional
from frigate import util from frigate import util
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.video import CameraWatchdog, track_camera from frigate.video import CameraTracker, CameraWatchdog
from .metrics import CameraMetrics, PTZMetrics from .metrics import CameraMetrics, PTZMetrics
@ -44,22 +44,15 @@ class Camera:
logger.info(f"Camera processor not started for disabled camera {self.name}") logger.info(f"Camera processor not started for disabled camera {self.name}")
return return
camera_process = util.Process( camera_process = CameraTracker(
target=track_camera, self.config.cameras[self.name],
name=f"camera_processor:{self.name}", self.config.model,
args=( detection_queue,
self.name, self.detection_out_event,
self.config.cameras[self.name], detected_frames_queue,
self.config.model, self.camera_metrics,
self.config.model.merged_labelmap, self.ptz_metrics,
detection_queue, self.region_grid,
self.detection_out_event,
detected_frames_queue,
self.camera_metrics,
self.ptz_metrics,
self.region_grid,
),
daemon=True,
) )
self.process = camera_process self.process = camera_process
self.camera_metrics.process_pid.value = camera_process.pid or 0 self.camera_metrics.process_pid.value = camera_process.pid or 0

View File

@ -3,12 +3,11 @@ import logging
import multiprocessing as mp import multiprocessing as mp
import os import os
import queue import queue
import signal
import subprocess as sp import subprocess as sp
import threading import threading
from multiprocessing.synchronize import Event
import cv2 import cv2
from setproctitle import setproctitle
from frigate import util from frigate import util
from frigate.camera.metrics import CameraMetrics, PTZMetrics from frigate.camera.metrics import CameraMetrics, PTZMetrics
@ -21,15 +20,12 @@ from frigate.const import (
REQUEST_REGION_GRID, REQUEST_REGION_GRID,
) )
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector from frigate.object_detection import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.image import ( from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager, SharedMemoryFrameManager,
draw_box_with_label, draw_box_with_label,
) )
@ -46,9 +42,6 @@ from frigate.util.object import (
is_object_filtered, is_object_filtered,
reduce_detections, reduce_detections,
) )
from frigate.util.services import listen
logger = logging.getLogger(__name__)
def stop_ffmpeg(ffmpeg_process, logger): def stop_ffmpeg(ffmpeg_process, logger):
@ -285,6 +278,7 @@ class CameraCapture(threading.Thread):
): ):
super().__init__(name=f"capture:{config.name}") super().__init__(name=f"capture:{config.name}")
self.logger = logging.getLogger(self.name)
self.config = config self.config = config
self.shm_frame_count = shm_frame_count self.shm_frame_count = shm_frame_count
self.frame_shape = frame_shape self.frame_shape = frame_shape
@ -328,12 +322,12 @@ class CameraCapture(threading.Thread):
if self.stop_event.is_set(): if self.stop_event.is_set():
break break
logger.error( self.logger.error(
f"{self.config.name}: Unable to read frames from ffmpeg process." f"{self.config.name}: Unable to read frames from ffmpeg process."
) )
if self.ffmpeg_process.poll() is not None: if self.ffmpeg_process.poll() is not None:
logger.error( self.logger.error(
f"{self.config.name}: ffmpeg process is not running. exiting capture thread..." f"{self.config.name}: ffmpeg process is not running. exiting capture thread..."
) )
break break
@ -356,442 +350,413 @@ class CameraCapture(threading.Thread):
self.frame_manager.delete(frame) self.frame_manager.delete(frame)
def track_camera( class CameraTracker(util.Process):
name, def __init__(
config: CameraConfig, self,
model_config, camera_config: CameraConfig,
labelmap, model_config: ModelConfig,
detection_queue, detection_queue: mp.Queue,
result_connection, result_connection: Event,
detected_objects_queue, detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics, camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics, ptz_metrics: PTZMetrics,
region_grid, region_grid: list[list[dict[str, int]]],
): ):
stop_event = mp.Event() super().__init__(name=f"frigate.process:{camera_config.name}", daemon=True)
self.camera_config = camera_config
self.model_config = model_config
self.labelmap = model_config.merged_labelmap
self.detection_queue = detection_queue
self.result_connection = result_connection
self.detected_objects_queue = detected_objects_queue
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.region_grid = region_grid
def receiveSignal(signalNumber, frame): def run(self):
stop_event.set() frame_shape = self.camera_config.frame_shape
signal.signal(signal.SIGTERM, receiveSignal) motion_detector = ImprovedMotionDetector(
signal.signal(signal.SIGINT, receiveSignal) frame_shape,
self.camera_config.motion,
threading.current_thread().name = f"process:{name}" self.camera_config.detect.fps,
setproctitle(f"frigate.process:{name}") name=self.camera_config.name,
listen()
frame_queue = camera_metrics.frame_queue
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
motion_detector = ImprovedMotionDetector(
frame_shape, config.motion, config.detect.fps, name=config.name
)
object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_config, stop_event
)
object_tracker = NorfairTracker(config, ptz_metrics)
frame_manager = SharedMemoryFrameManager()
# create communication for region grid updates
requestor = InterProcessRequestor()
process_frames(
name,
requestor,
frame_queue,
frame_shape,
model_config,
config.detect,
frame_manager,
motion_detector,
object_detector,
object_tracker,
detected_objects_queue,
camera_metrics,
objects_to_track,
object_filters,
stop_event,
ptz_metrics,
region_grid,
)
# empty the frame queue
logger.info(f"{name}: emptying frame queue")
while not frame_queue.empty():
frame_time = frame_queue.get(False)
frame_manager.delete(f"{name}{frame_time}")
logger.info(f"{name}: exiting subprocess")
def detect(
detect_config: DetectConfig,
object_detector,
frame,
model_config,
region,
objects_to_track,
object_filters,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
) )
# apply object filters object_detector = RemoteObjectDetector(
if is_object_filtered(det, objects_to_track, object_filters): self.camera_config.name,
continue self.labelmap,
detections.append(det) self.detection_queue,
return detections self.result_connection,
self.model_config,
self.stop_event,
def process_frames(
camera_name: str,
requestor: InterProcessRequestor,
frame_queue: mp.Queue,
frame_shape,
model_config: ModelConfig,
detect_config: DetectConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics,
objects_to_track: list[str],
object_filters,
stop_event,
ptz_metrics: PTZMetrics,
region_grid,
exit_on_empty: bool = False,
):
next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}")
fps_tracker = EventsPerSecond()
fps_tracker.start()
startup_scan = True
stationary_frame_counter = 0
region_min_size = get_min_region_size(model_config)
while not stop_event.is_set():
# check for updated detect config
_, updated_detect_config = config_subscriber.check_for_update()
if updated_detect_config:
detect_config = updated_detect_config
if (
datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_region_update
):
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_name)
next_region_update = get_tomorrow_at_time(2)
try:
if exit_on_empty:
frame_time = frame_queue.get(False)
else:
frame_time = frame_queue.get(True, 1)
except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue
camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
) )
if frame is None: object_tracker = NorfairTracker(self.camera_config, self.ptz_metrics)
logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.")
continue
# look for motion if enabled frame_manager = SharedMemoryFrameManager()
motion_boxes = motion_detector.detect(frame)
regions = [] # create communication for region grid updates
consolidated_detections = [] requestor = InterProcessRequestor()
# if detection is disabled detect_config = self.camera_config.detect
if not detect_config.enabled: next_region_update = get_tomorrow_at_time(2)
object_tracker.match_and_update(frame_time, []) config_subscriber = ConfigSubscriber(f"config/detect/{self.camera_config.name}")
else:
# get stationary object ids
# check every Nth frame for stationary objects
# disappeared objects are not stationary
# also check for overlapping motion boxes
if stationary_frame_counter == detect_config.stationary.interval:
stationary_frame_counter = 0
stationary_object_ids = []
else:
stationary_frame_counter += 1
stationary_object_ids = [
obj["id"]
for obj in object_tracker.tracked_objects.values()
# if it has exceeded the stationary threshold
if obj["motionless_count"] >= detect_config.stationary.threshold
# and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes when not calibrating
and not intersects_any(
obj["box"],
[] if motion_detector.is_calibrating() else motion_boxes,
)
]
# get tracked object boxes that aren't stationary fps_tracker = EventsPerSecond()
tracked_object_boxes = [ fps_tracker.start()
(
# use existing object box for stationary objects
obj["estimate"]
if obj["motionless_count"] < detect_config.stationary.threshold
else obj["box"]
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids
]
object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes
# get consolidated regions for tracked objects startup_scan = True
regions = [ stationary_frame_counter = 0
get_cluster_region(
frame_shape, region_min_size, candidate, object_boxes
)
for candidate in get_cluster_candidates(
frame_shape, region_min_size, object_boxes
)
]
# only add in the motion boxes when not calibrating and a ptz is not moving via autotracking region_min_size = get_min_region_size(self.model_config)
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time( while not self.stop_event.is_set():
frame_time, # check for updated detect config
ptz_metrics.start_time.value, _, updated_detect_config = config_subscriber.check_for_update()
ptz_metrics.stop_time.value,
if updated_detect_config:
detect_config = updated_detect_config
if (
datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_region_update
): ):
# find motion boxes that are not inside tracked object regions self.region_grid = requestor.send_data(
standalone_motion_boxes = [ REQUEST_REGION_GRID, self.camera_config.name
b for b in motion_boxes if not inside_any(b, regions) )
next_region_update = get_tomorrow_at_time(2)
try:
frame_time = self.camera_metrics.frame_queue.get(timeout=1)
except queue.Empty:
continue
self.camera_metrics.detection_frame.value = frame_time
self.ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(
f"{self.camera_config.name}{frame_time}",
(frame_shape[0] * 3 // 2, frame_shape[1]),
)
if frame is None:
self.logger.debug(
f"{self.camera_config.name}: frame {frame_time} is not in memory store."
)
continue
# look for motion if enabled
motion_boxes = motion_detector.detect(frame)
regions = []
consolidated_detections = []
# if detection is disabled
if not detect_config.enabled:
object_tracker.match_and_update(frame_time, [])
else:
# get stationary object ids
# check every Nth frame for stationary objects
# disappeared objects are not stationary
# also check for overlapping motion boxes
if stationary_frame_counter == detect_config.stationary.interval:
stationary_frame_counter = 0
stationary_object_ids = []
else:
stationary_frame_counter += 1
stationary_object_ids = [
obj["id"]
for obj in object_tracker.tracked_objects.values()
# if it has exceeded the stationary threshold
if obj["motionless_count"] >= detect_config.stationary.threshold
# and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes when not calibrating
and not intersects_any(
obj["box"],
[] if motion_detector.is_calibrating() else motion_boxes,
)
]
# get tracked object boxes that aren't stationary
tracked_object_boxes = [
(
# use existing object box for stationary objects
obj["estimate"]
if obj["motionless_count"] < detect_config.stationary.threshold
else obj["box"]
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids
]
object_boxes = (
tracked_object_boxes + object_tracker.untracked_object_boxes
)
# get consolidated regions for tracked objects
regions = [
get_cluster_region(
frame_shape, region_min_size, candidate, object_boxes
)
for candidate in get_cluster_candidates(
frame_shape, region_min_size, object_boxes
)
] ]
if standalone_motion_boxes: # only add in the motion boxes when not calibrating and a ptz is not moving via autotracking
motion_clusters = get_cluster_candidates( # ptz_moving_at_frame_time() always returns False for non-autotracking cameras
frame_shape, if (
region_min_size, not motion_detector.is_calibrating()
standalone_motion_boxes, and not ptz_moving_at_frame_time(
frame_time,
self.ptz_metrics.start_time.value,
self.ptz_metrics.stop_time.value,
) )
motion_regions = [ ):
get_cluster_region_from_grid( # find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [
b for b in motion_boxes if not inside_any(b, regions)
]
if standalone_motion_boxes:
motion_clusters = get_cluster_candidates(
frame_shape, frame_shape,
region_min_size, region_min_size,
candidate,
standalone_motion_boxes, standalone_motion_boxes,
region_grid,
) )
for candidate in motion_clusters motion_regions = [
get_cluster_region_from_grid(
frame_shape,
region_min_size,
candidate,
standalone_motion_boxes,
self.region_grid,
)
for candidate in motion_clusters
]
regions += motion_regions
# if starting up, get the next startup scan region
if startup_scan:
for region in get_startup_regions(
frame_shape, region_min_size, self.region_grid
):
regions.append(region)
startup_scan = False
# resize regions and detect
# seed with stationary objects
detections = [
(
obj["label"],
obj["score"],
obj["box"],
obj["area"],
obj["ratio"],
obj["region"],
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] in stationary_object_ids
]
for region in regions:
detections.extend(
self.detect(
detect_config,
object_detector,
frame,
self.model_config,
region,
)
)
consolidated_detections = reduce_detections(frame_shape, detections)
# if detection was run on this frame, consolidate
if len(regions) > 0:
tracked_detections = [
d
for d in consolidated_detections
if d[0] not in self.model_config.all_attributes
] ]
regions += motion_regions # now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections)
# else, just update the frame times for the stationary objects
else:
object_tracker.update_frame_times(frame_time)
# if starting up, get the next startup scan region # group the attribute detections based on what label they apply to
if startup_scan: attribute_detections = {}
for region in get_startup_regions( for label, attribute_labels in self.model_config.attributes_map.items():
frame_shape, region_min_size, region_grid attribute_detections[label] = [
): d for d in consolidated_detections if d[0] in attribute_labels
regions.append(region) ]
startup_scan = False
# resize regions and detect # build detections and add attributes
# seed with stationary objects detections = {}
detections = [ for obj in object_tracker.tracked_objects.values():
( attributes = []
obj["label"], # if the objects label has associated attribute detections
obj["score"], if obj["label"] in attribute_detections.keys():
obj["box"], # add them to attributes if they intersect
obj["area"], for attribute_detection in attribute_detections[obj["label"]]:
obj["ratio"], if box_inside(obj["box"], (attribute_detection[2])):
obj["region"], attributes.append(
{
"label": attribute_detection[0],
"score": attribute_detection[1],
"box": attribute_detection[2],
}
)
detections[obj["id"]] = {**obj, "attributes": attributes}
# debug object tracking
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
)
object_tracker.debug_draw(bgr_frame, frame_time)
cv2.imwrite(
f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame
)
# debug
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
) )
for obj in object_tracker.tracked_objects.values()
if obj["id"] in stationary_object_ids
]
for region in regions: for m_box in motion_boxes:
detections.extend( cv2.rectangle(
detect( bgr_frame,
detect_config, (m_box[0], m_box[1]),
object_detector, (m_box[2], m_box[3]),
frame, (0, 0, 255),
model_config, 2,
region, )
objects_to_track,
object_filters, for b in tracked_object_boxes:
cv2.rectangle(
bgr_frame,
(b[0], b[1]),
(b[2], b[3]),
(255, 0, 0),
2,
)
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = self.model_config.colormap[obj["label"]]
else:
thickness = 1
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj["box"]
draw_box_with_label(
bgr_frame,
box[0],
box[1],
box[2],
box[3],
obj["label"],
obj["id"],
thickness=thickness,
color=color,
)
for region in regions:
cv2.rectangle(
bgr_frame,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
cv2.imwrite(
f"debug/frames/{self.camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame,
)
# add to the queue if not full
if self.detected_objects_queue.full():
frame_manager.delete(f"{self.camera_config.name}{frame_time}")
continue
else:
fps_tracker.update()
self.camera_metrics.process_fps.value = fps_tracker.eps()
self.detected_objects_queue.put(
(
self.camera_config.name,
frame_time,
detections,
motion_boxes,
regions,
) )
) )
self.camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{self.camera_config.name}{frame_time}")
consolidated_detections = reduce_detections(frame_shape, detections) motion_detector.stop()
requestor.stop()
config_subscriber.stop()
# if detection was run on this frame, consolidate # empty the frame queue
if len(regions) > 0: self.logger.info(f"{self.name}: emptying frame queue")
tracked_detections = [ frame_queue = self.camera_metrics.frame_queue
d while not frame_queue.empty():
for d in consolidated_detections frame_time = frame_queue.get(False)
if d[0] not in model_config.all_attributes frame_manager.delete(f"{self.name}{frame_time}")
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections)
# else, just update the frame times for the stationary objects
else:
object_tracker.update_frame_times(frame_time)
# group the attribute detections based on what label they apply to self.logger.info(f"{self.name}: exiting subprocess")
attribute_detections = {}
for label, attribute_labels in model_config.attributes_map.items():
attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels
]
# build detections and add attributes def detect(
detections = {} self,
for obj in object_tracker.tracked_objects.values(): detect_config: DetectConfig,
attributes = [] object_detector,
# if the objects label has associated attribute detections frame,
if obj["label"] in attribute_detections.keys(): model_config,
# add them to attributes if they intersect region,
for attribute_detection in attribute_detections[obj["label"]]: ):
if box_inside(obj["box"], (attribute_detection[2])): tensor_input = create_tensor_input(frame, model_config, region)
attributes.append(
{
"label": attribute_detection[0],
"score": attribute_detection[1],
"box": attribute_detection[2],
}
)
detections[obj["id"]] = {**obj, "attributes": attributes}
# debug object tracking detections = []
if False: region_detections = object_detector.detect(tensor_input)
bgr_frame = cv2.cvtColor( for d in region_detections:
frame, box = d[2]
cv2.COLOR_YUV2BGR_I420, size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (
y_min >= detect_config.height - 1
):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
) )
object_tracker.debug_draw(bgr_frame, frame_time) # apply object filters
cv2.imwrite( if is_object_filtered(
f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame det,
) self.camera_config.objects.track,
# debug self.camera_config.objects.filters,
if False: ):
bgr_frame = cv2.cvtColor( continue
frame, detections.append(det)
cv2.COLOR_YUV2BGR_I420, return detections
)
for m_box in motion_boxes:
cv2.rectangle(
bgr_frame,
(m_box[0], m_box[1]),
(m_box[2], m_box[3]),
(0, 0, 255),
2,
)
for b in tracked_object_boxes:
cv2.rectangle(
bgr_frame,
(b[0], b[1]),
(b[2], b[3]),
(255, 0, 0),
2,
)
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = model_config.colormap[obj["label"]]
else:
thickness = 1
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj["box"]
draw_box_with_label(
bgr_frame,
box[0],
box[1],
box[2],
box[3],
obj["label"],
obj["id"],
thickness=thickness,
color=color,
)
for region in regions:
cv2.rectangle(
bgr_frame,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
cv2.imwrite(
f"debug/frames/{camera_name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame,
)
# add to the queue if not full
if detected_objects_queue.full():
frame_manager.delete(f"{camera_name}{frame_time}")
continue
else:
fps_tracker.update()
camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put(
(
camera_name,
frame_time,
detections,
motion_boxes,
regions,
)
)
camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
motion_detector.stop()
requestor.stop()
config_subscriber.stop()