Move track_camera into CameraTracker process

This commit is contained in:
George Tsiamasiotis 2024-10-02 09:17:45 +03:00
parent abe4ce43f3
commit 4fc970dbdd
2 changed files with 392 additions and 434 deletions

View File

@ -6,7 +6,7 @@ from typing import Optional
from frigate import util from frigate import util
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.util.object import get_camera_regions_grid from frigate.util.object import get_camera_regions_grid
from frigate.video import CameraWatchdog, track_camera from frigate.video import CameraTracker, CameraWatchdog
from .metrics import CameraMetrics, PTZMetrics from .metrics import CameraMetrics, PTZMetrics
@ -44,22 +44,15 @@ class Camera:
logger.info(f"Camera processor not started for disabled camera {self.name}") logger.info(f"Camera processor not started for disabled camera {self.name}")
return return
camera_process = util.Process( camera_process = CameraTracker(
target=track_camera,
name=f"camera_processor:{self.name}",
args=(
self.name,
self.config.cameras[self.name], self.config.cameras[self.name],
self.config.model, self.config.model,
self.config.model.merged_labelmap,
detection_queue, detection_queue,
self.detection_out_event, self.detection_out_event,
detected_frames_queue, detected_frames_queue,
self.camera_metrics, self.camera_metrics,
self.ptz_metrics, self.ptz_metrics,
self.region_grid, self.region_grid,
),
daemon=True,
) )
self.process = camera_process self.process = camera_process
self.camera_metrics.process_pid.value = camera_process.pid or 0 self.camera_metrics.process_pid.value = camera_process.pid or 0

View File

@ -3,12 +3,11 @@ import logging
import multiprocessing as mp import multiprocessing as mp
import os import os
import queue import queue
import signal
import subprocess as sp import subprocess as sp
import threading import threading
from multiprocessing.synchronize import Event
import cv2 import cv2
from setproctitle import setproctitle
from frigate import util from frigate import util
from frigate.camera.metrics import CameraMetrics, PTZMetrics from frigate.camera.metrics import CameraMetrics, PTZMetrics
@ -21,15 +20,12 @@ from frigate.const import (
REQUEST_REGION_GRID, REQUEST_REGION_GRID,
) )
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector from frigate.object_detection import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.image import ( from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager, SharedMemoryFrameManager,
draw_box_with_label, draw_box_with_label,
) )
@ -46,9 +42,6 @@ from frigate.util.object import (
is_object_filtered, is_object_filtered,
reduce_detections, reduce_detections,
) )
from frigate.util.services import listen
logger = logging.getLogger(__name__)
def stop_ffmpeg(ffmpeg_process, logger): def stop_ffmpeg(ffmpeg_process, logger):
@ -285,6 +278,7 @@ class CameraCapture(threading.Thread):
): ):
super().__init__(name=f"capture:{config.name}") super().__init__(name=f"capture:{config.name}")
self.logger = logging.getLogger(self.name)
self.config = config self.config = config
self.shm_frame_count = shm_frame_count self.shm_frame_count = shm_frame_count
self.frame_shape = frame_shape self.frame_shape = frame_shape
@ -328,12 +322,12 @@ class CameraCapture(threading.Thread):
if self.stop_event.is_set(): if self.stop_event.is_set():
break break
logger.error( self.logger.error(
f"{self.config.name}: Unable to read frames from ffmpeg process." f"{self.config.name}: Unable to read frames from ffmpeg process."
) )
if self.ffmpeg_process.poll() is not None: if self.ffmpeg_process.poll() is not None:
logger.error( self.logger.error(
f"{self.config.name}: ffmpeg process is not running. exiting capture thread..." f"{self.config.name}: ffmpeg process is not running. exiting capture thread..."
) )
break break
@ -356,145 +350,57 @@ class CameraCapture(threading.Thread):
self.frame_manager.delete(frame) self.frame_manager.delete(frame)
def track_camera( class CameraTracker(util.Process):
name, def __init__(
config: CameraConfig, self,
model_config, camera_config: CameraConfig,
labelmap, model_config: ModelConfig,
detection_queue, detection_queue: mp.Queue,
result_connection, result_connection: Event,
detected_objects_queue, detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics, camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics, ptz_metrics: PTZMetrics,
region_grid, region_grid: list[list[dict[str, int]]],
): ):
stop_event = mp.Event() super().__init__(name=f"frigate.process:{camera_config.name}", daemon=True)
self.camera_config = camera_config
self.model_config = model_config
self.labelmap = model_config.merged_labelmap
self.detection_queue = detection_queue
self.result_connection = result_connection
self.detected_objects_queue = detected_objects_queue
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.region_grid = region_grid
def receiveSignal(signalNumber, frame): def run(self):
stop_event.set() frame_shape = self.camera_config.frame_shape
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"process:{name}"
setproctitle(f"frigate.process:{name}")
listen()
frame_queue = camera_metrics.frame_queue
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
motion_detector = ImprovedMotionDetector( motion_detector = ImprovedMotionDetector(
frame_shape, config.motion, config.detect.fps, name=config.name frame_shape,
self.camera_config.motion,
self.camera_config.detect.fps,
name=self.camera_config.name,
) )
object_detector = RemoteObjectDetector( object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_config, stop_event self.camera_config.name,
self.labelmap,
self.detection_queue,
self.result_connection,
self.model_config,
self.stop_event,
) )
object_tracker = NorfairTracker(config, ptz_metrics) object_tracker = NorfairTracker(self.camera_config, self.ptz_metrics)
frame_manager = SharedMemoryFrameManager() frame_manager = SharedMemoryFrameManager()
# create communication for region grid updates # create communication for region grid updates
requestor = InterProcessRequestor() requestor = InterProcessRequestor()
process_frames( detect_config = self.camera_config.detect
name,
requestor,
frame_queue,
frame_shape,
model_config,
config.detect,
frame_manager,
motion_detector,
object_detector,
object_tracker,
detected_objects_queue,
camera_metrics,
objects_to_track,
object_filters,
stop_event,
ptz_metrics,
region_grid,
)
# empty the frame queue
logger.info(f"{name}: emptying frame queue")
while not frame_queue.empty():
frame_time = frame_queue.get(False)
frame_manager.delete(f"{name}{frame_time}")
logger.info(f"{name}: exiting subprocess")
def detect(
detect_config: DetectConfig,
object_detector,
frame,
model_config,
region,
objects_to_track,
object_filters,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
)
# apply object filters
if is_object_filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
def process_frames(
camera_name: str,
requestor: InterProcessRequestor,
frame_queue: mp.Queue,
frame_shape,
model_config: ModelConfig,
detect_config: DetectConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics,
objects_to_track: list[str],
object_filters,
stop_event,
ptz_metrics: PTZMetrics,
region_grid,
exit_on_empty: bool = False,
):
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}") config_subscriber = ConfigSubscriber(f"config/detect/{self.camera_config.name}")
fps_tracker = EventsPerSecond() fps_tracker = EventsPerSecond()
fps_tracker.start() fps_tracker.start()
@ -502,9 +408,9 @@ def process_frames(
startup_scan = True startup_scan = True
stationary_frame_counter = 0 stationary_frame_counter = 0
region_min_size = get_min_region_size(model_config) region_min_size = get_min_region_size(self.model_config)
while not stop_event.is_set(): while not self.stop_event.is_set():
# check for updated detect config # check for updated detect config
_, updated_detect_config = config_subscriber.check_for_update() _, updated_detect_config = config_subscriber.check_for_update()
@ -515,29 +421,28 @@ def process_frames(
datetime.datetime.now().astimezone(datetime.timezone.utc) datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_region_update > next_region_update
): ):
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_name) self.region_grid = requestor.send_data(
REQUEST_REGION_GRID, self.camera_config.name
)
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_time(2)
try: try:
if exit_on_empty: frame_time = self.camera_metrics.frame_queue.get(timeout=1)
frame_time = frame_queue.get(False)
else:
frame_time = frame_queue.get(True, 1)
except queue.Empty: except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue continue
camera_metrics.detection_frame.value = frame_time self.camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time self.ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get( frame = frame_manager.get(
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1]) f"{self.camera_config.name}{frame_time}",
(frame_shape[0] * 3 // 2, frame_shape[1]),
) )
if frame is None: if frame is None:
logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.") self.logger.debug(
f"{self.camera_config.name}: frame {frame_time} is not in memory store."
)
continue continue
# look for motion if enabled # look for motion if enabled
@ -584,7 +489,9 @@ def process_frames(
for obj in object_tracker.tracked_objects.values() for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids if obj["id"] not in stationary_object_ids
] ]
object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes object_boxes = (
tracked_object_boxes + object_tracker.untracked_object_boxes
)
# get consolidated regions for tracked objects # get consolidated regions for tracked objects
regions = [ regions = [
@ -598,10 +505,13 @@ def process_frames(
# only add in the motion boxes when not calibrating and a ptz is not moving via autotracking # only add in the motion boxes when not calibrating and a ptz is not moving via autotracking
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras # ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time( if (
not motion_detector.is_calibrating()
and not ptz_moving_at_frame_time(
frame_time, frame_time,
ptz_metrics.start_time.value, self.ptz_metrics.start_time.value,
ptz_metrics.stop_time.value, self.ptz_metrics.stop_time.value,
)
): ):
# find motion boxes that are not inside tracked object regions # find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [ standalone_motion_boxes = [
@ -620,7 +530,7 @@ def process_frames(
region_min_size, region_min_size,
candidate, candidate,
standalone_motion_boxes, standalone_motion_boxes,
region_grid, self.region_grid,
) )
for candidate in motion_clusters for candidate in motion_clusters
] ]
@ -629,7 +539,7 @@ def process_frames(
# if starting up, get the next startup scan region # if starting up, get the next startup scan region
if startup_scan: if startup_scan:
for region in get_startup_regions( for region in get_startup_regions(
frame_shape, region_min_size, region_grid frame_shape, region_min_size, self.region_grid
): ):
regions.append(region) regions.append(region)
startup_scan = False startup_scan = False
@ -651,14 +561,12 @@ def process_frames(
for region in regions: for region in regions:
detections.extend( detections.extend(
detect( self.detect(
detect_config, detect_config,
object_detector, object_detector,
frame, frame,
model_config, self.model_config,
region, region,
objects_to_track,
object_filters,
) )
) )
@ -669,7 +577,7 @@ def process_frames(
tracked_detections = [ tracked_detections = [
d d
for d in consolidated_detections for d in consolidated_detections
if d[0] not in model_config.all_attributes if d[0] not in self.model_config.all_attributes
] ]
# now that we have refined our detections, we need to track objects # now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections) object_tracker.match_and_update(frame_time, tracked_detections)
@ -679,7 +587,7 @@ def process_frames(
# group the attribute detections based on what label they apply to # group the attribute detections based on what label they apply to
attribute_detections = {} attribute_detections = {}
for label, attribute_labels in model_config.attributes_map.items(): for label, attribute_labels in self.model_config.attributes_map.items():
attribute_detections[label] = [ attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels d for d in consolidated_detections if d[0] in attribute_labels
] ]
@ -740,7 +648,7 @@ def process_frames(
for obj in object_tracker.tracked_objects.values(): for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time: if obj["frame_time"] == frame_time:
thickness = 2 thickness = 2
color = model_config.colormap[obj["label"]] color = self.model_config.colormap[obj["label"]]
else: else:
thickness = 1 thickness = 1
color = (255, 0, 0) color = (255, 0, 0)
@ -770,28 +678,85 @@ def process_frames(
) )
cv2.imwrite( cv2.imwrite(
f"debug/frames/{camera_name}-{'{:.6f}'.format(frame_time)}.jpg", f"debug/frames/{self.camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame, bgr_frame,
) )
# add to the queue if not full # add to the queue if not full
if detected_objects_queue.full(): if self.detected_objects_queue.full():
frame_manager.delete(f"{camera_name}{frame_time}") frame_manager.delete(f"{self.camera_config.name}{frame_time}")
continue continue
else: else:
fps_tracker.update() fps_tracker.update()
camera_metrics.process_fps.value = fps_tracker.eps() self.camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put( self.detected_objects_queue.put(
( (
camera_name, self.camera_config.name,
frame_time, frame_time,
detections, detections,
motion_boxes, motion_boxes,
regions, regions,
) )
) )
camera_metrics.detection_fps.value = object_detector.fps.eps() self.camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}") frame_manager.close(f"{self.camera_config.name}{frame_time}")
motion_detector.stop() motion_detector.stop()
requestor.stop() requestor.stop()
config_subscriber.stop() config_subscriber.stop()
# empty the frame queue
self.logger.info(f"{self.name}: emptying frame queue")
frame_queue = self.camera_metrics.frame_queue
while not frame_queue.empty():
frame_time = frame_queue.get(False)
frame_manager.delete(f"{self.name}{frame_time}")
self.logger.info(f"{self.name}: exiting subprocess")
def detect(
self,
detect_config: DetectConfig,
object_detector,
frame,
model_config,
region,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (
y_min >= detect_config.height - 1
):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
)
# apply object filters
if is_object_filtered(
det,
self.camera_config.objects.track,
self.camera_config.objects.filters,
):
continue
detections.append(det)
return detections