Move track_camera into CameraTracker process

This commit is contained in:
George Tsiamasiotis 2024-10-02 09:17:45 +03:00
parent abe4ce43f3
commit 4fc970dbdd
2 changed files with 392 additions and 434 deletions

View File

@ -6,7 +6,7 @@ from typing import Optional
from frigate import util
from frigate.config import FrigateConfig
from frigate.util.object import get_camera_regions_grid
from frigate.video import CameraWatchdog, track_camera
from frigate.video import CameraTracker, CameraWatchdog
from .metrics import CameraMetrics, PTZMetrics
@ -44,22 +44,15 @@ class Camera:
logger.info(f"Camera processor not started for disabled camera {self.name}")
return
camera_process = util.Process(
target=track_camera,
name=f"camera_processor:{self.name}",
args=(
self.name,
camera_process = CameraTracker(
self.config.cameras[self.name],
self.config.model,
self.config.model.merged_labelmap,
detection_queue,
self.detection_out_event,
detected_frames_queue,
self.camera_metrics,
self.ptz_metrics,
self.region_grid,
),
daemon=True,
)
self.process = camera_process
self.camera_metrics.process_pid.value = camera_process.pid or 0

View File

@ -3,12 +3,11 @@ import logging
import multiprocessing as mp
import os
import queue
import signal
import subprocess as sp
import threading
from multiprocessing.synchronize import Event
import cv2
from setproctitle import setproctitle
from frigate import util
from frigate.camera.metrics import CameraMetrics, PTZMetrics
@ -21,15 +20,12 @@ from frigate.const import (
REQUEST_REGION_GRID,
)
from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager,
draw_box_with_label,
)
@ -46,9 +42,6 @@ from frigate.util.object import (
is_object_filtered,
reduce_detections,
)
from frigate.util.services import listen
logger = logging.getLogger(__name__)
def stop_ffmpeg(ffmpeg_process, logger):
@ -285,6 +278,7 @@ class CameraCapture(threading.Thread):
):
super().__init__(name=f"capture:{config.name}")
self.logger = logging.getLogger(self.name)
self.config = config
self.shm_frame_count = shm_frame_count
self.frame_shape = frame_shape
@ -328,12 +322,12 @@ class CameraCapture(threading.Thread):
if self.stop_event.is_set():
break
logger.error(
self.logger.error(
f"{self.config.name}: Unable to read frames from ffmpeg process."
)
if self.ffmpeg_process.poll() is not None:
logger.error(
self.logger.error(
f"{self.config.name}: ffmpeg process is not running. exiting capture thread..."
)
break
@ -356,145 +350,57 @@ class CameraCapture(threading.Thread):
self.frame_manager.delete(frame)
def track_camera(
name,
config: CameraConfig,
model_config,
labelmap,
detection_queue,
result_connection,
detected_objects_queue,
class CameraTracker(util.Process):
def __init__(
self,
camera_config: CameraConfig,
model_config: ModelConfig,
detection_queue: mp.Queue,
result_connection: Event,
detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics,
region_grid,
):
stop_event = mp.Event()
region_grid: list[list[dict[str, int]]],
):
super().__init__(name=f"frigate.process:{camera_config.name}", daemon=True)
self.camera_config = camera_config
self.model_config = model_config
self.labelmap = model_config.merged_labelmap
self.detection_queue = detection_queue
self.result_connection = result_connection
self.detected_objects_queue = detected_objects_queue
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.region_grid = region_grid
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"process:{name}"
setproctitle(f"frigate.process:{name}")
listen()
frame_queue = camera_metrics.frame_queue
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
def run(self):
frame_shape = self.camera_config.frame_shape
motion_detector = ImprovedMotionDetector(
frame_shape, config.motion, config.detect.fps, name=config.name
frame_shape,
self.camera_config.motion,
self.camera_config.detect.fps,
name=self.camera_config.name,
)
object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_config, stop_event
self.camera_config.name,
self.labelmap,
self.detection_queue,
self.result_connection,
self.model_config,
self.stop_event,
)
object_tracker = NorfairTracker(config, ptz_metrics)
object_tracker = NorfairTracker(self.camera_config, self.ptz_metrics)
frame_manager = SharedMemoryFrameManager()
# create communication for region grid updates
requestor = InterProcessRequestor()
process_frames(
name,
requestor,
frame_queue,
frame_shape,
model_config,
config.detect,
frame_manager,
motion_detector,
object_detector,
object_tracker,
detected_objects_queue,
camera_metrics,
objects_to_track,
object_filters,
stop_event,
ptz_metrics,
region_grid,
)
# empty the frame queue
logger.info(f"{name}: emptying frame queue")
while not frame_queue.empty():
frame_time = frame_queue.get(False)
frame_manager.delete(f"{name}{frame_time}")
logger.info(f"{name}: exiting subprocess")
def detect(
detect_config: DetectConfig,
object_detector,
frame,
model_config,
region,
objects_to_track,
object_filters,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
)
# apply object filters
if is_object_filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
def process_frames(
camera_name: str,
requestor: InterProcessRequestor,
frame_queue: mp.Queue,
frame_shape,
model_config: ModelConfig,
detect_config: DetectConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
camera_metrics: CameraMetrics,
objects_to_track: list[str],
object_filters,
stop_event,
ptz_metrics: PTZMetrics,
region_grid,
exit_on_empty: bool = False,
):
detect_config = self.camera_config.detect
next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}")
config_subscriber = ConfigSubscriber(f"config/detect/{self.camera_config.name}")
fps_tracker = EventsPerSecond()
fps_tracker.start()
@ -502,9 +408,9 @@ def process_frames(
startup_scan = True
stationary_frame_counter = 0
region_min_size = get_min_region_size(model_config)
region_min_size = get_min_region_size(self.model_config)
while not stop_event.is_set():
while not self.stop_event.is_set():
# check for updated detect config
_, updated_detect_config = config_subscriber.check_for_update()
@ -515,29 +421,28 @@ def process_frames(
datetime.datetime.now().astimezone(datetime.timezone.utc)
> next_region_update
):
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_name)
self.region_grid = requestor.send_data(
REQUEST_REGION_GRID, self.camera_config.name
)
next_region_update = get_tomorrow_at_time(2)
try:
if exit_on_empty:
frame_time = frame_queue.get(False)
else:
frame_time = frame_queue.get(True, 1)
frame_time = self.camera_metrics.frame_queue.get(timeout=1)
except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue
camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time
self.camera_metrics.detection_frame.value = frame_time
self.ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
f"{self.camera_config.name}{frame_time}",
(frame_shape[0] * 3 // 2, frame_shape[1]),
)
if frame is None:
logger.debug(f"{camera_name}: frame {frame_time} is not in memory store.")
self.logger.debug(
f"{self.camera_config.name}: frame {frame_time} is not in memory store."
)
continue
# look for motion if enabled
@ -584,7 +489,9 @@ def process_frames(
for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids
]
object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes
object_boxes = (
tracked_object_boxes + object_tracker.untracked_object_boxes
)
# get consolidated regions for tracked objects
regions = [
@ -598,10 +505,13 @@ def process_frames(
# only add in the motion boxes when not calibrating and a ptz is not moving via autotracking
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time(
if (
not motion_detector.is_calibrating()
and not ptz_moving_at_frame_time(
frame_time,
ptz_metrics.start_time.value,
ptz_metrics.stop_time.value,
self.ptz_metrics.start_time.value,
self.ptz_metrics.stop_time.value,
)
):
# find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [
@ -620,7 +530,7 @@ def process_frames(
region_min_size,
candidate,
standalone_motion_boxes,
region_grid,
self.region_grid,
)
for candidate in motion_clusters
]
@ -629,7 +539,7 @@ def process_frames(
# if starting up, get the next startup scan region
if startup_scan:
for region in get_startup_regions(
frame_shape, region_min_size, region_grid
frame_shape, region_min_size, self.region_grid
):
regions.append(region)
startup_scan = False
@ -651,14 +561,12 @@ def process_frames(
for region in regions:
detections.extend(
detect(
self.detect(
detect_config,
object_detector,
frame,
model_config,
self.model_config,
region,
objects_to_track,
object_filters,
)
)
@ -669,7 +577,7 @@ def process_frames(
tracked_detections = [
d
for d in consolidated_detections
if d[0] not in model_config.all_attributes
if d[0] not in self.model_config.all_attributes
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, tracked_detections)
@ -679,7 +587,7 @@ def process_frames(
# group the attribute detections based on what label they apply to
attribute_detections = {}
for label, attribute_labels in model_config.attributes_map.items():
for label, attribute_labels in self.model_config.attributes_map.items():
attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels
]
@ -740,7 +648,7 @@ def process_frames(
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = model_config.colormap[obj["label"]]
color = self.model_config.colormap[obj["label"]]
else:
thickness = 1
color = (255, 0, 0)
@ -770,28 +678,85 @@ def process_frames(
)
cv2.imwrite(
f"debug/frames/{camera_name}-{'{:.6f}'.format(frame_time)}.jpg",
f"debug/frames/{self.camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame,
)
# add to the queue if not full
if detected_objects_queue.full():
frame_manager.delete(f"{camera_name}{frame_time}")
if self.detected_objects_queue.full():
frame_manager.delete(f"{self.camera_config.name}{frame_time}")
continue
else:
fps_tracker.update()
camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put(
self.camera_metrics.process_fps.value = fps_tracker.eps()
self.detected_objects_queue.put(
(
camera_name,
self.camera_config.name,
frame_time,
detections,
motion_boxes,
regions,
)
)
camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
self.camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{self.camera_config.name}{frame_time}")
motion_detector.stop()
requestor.stop()
config_subscriber.stop()
# empty the frame queue
self.logger.info(f"{self.name}: emptying frame queue")
frame_queue = self.camera_metrics.frame_queue
while not frame_queue.empty():
frame_time = frame_queue.get(False)
frame_manager.delete(f"{self.name}{frame_time}")
self.logger.info(f"{self.name}: exiting subprocess")
def detect(
self,
detect_config: DetectConfig,
object_detector,
frame,
model_config,
region,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (
y_min >= detect_config.height - 1
):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
area,
ratio,
region,
)
# apply object filters
if is_object_filtered(
det,
self.camera_config.objects.track,
self.camera_config.objects.filters,
):
continue
detections.append(det)
return detections