2020-02-16 06:07:54 +03:00
|
|
|
import datetime
|
2020-11-04 15:28:07 +03:00
|
|
|
import logging
|
2020-09-22 05:02:00 +03:00
|
|
|
import queue
|
2025-08-22 16:11:48 +03:00
|
|
|
import threading
|
|
|
|
|
import time
|
2020-08-22 15:05:20 +03:00
|
|
|
from abc import ABC, abstractmethod
|
2025-08-22 16:11:48 +03:00
|
|
|
from collections import deque
|
2025-04-24 02:06:06 +03:00
|
|
|
from multiprocessing import Queue, Value
|
|
|
|
|
from multiprocessing.synchronize import Event as MpEvent
|
2020-11-04 15:28:07 +03:00
|
|
|
|
2020-02-09 16:39:24 +03:00
|
|
|
import numpy as np
|
2025-11-04 03:42:59 +03:00
|
|
|
import zmq
|
2022-11-04 05:23:09 +03:00
|
|
|
|
2025-06-11 20:25:30 +03:00
|
|
|
from frigate.comms.object_detector_signaler import (
|
|
|
|
|
ObjectDetectorPublisher,
|
|
|
|
|
ObjectDetectorSubscriber,
|
|
|
|
|
)
|
2025-06-13 17:43:38 +03:00
|
|
|
from frigate.config import FrigateConfig
|
2025-07-18 20:23:06 +03:00
|
|
|
from frigate.const import PROCESS_PRIORITY_HIGH
|
2022-12-15 16:12:52 +03:00
|
|
|
from frigate.detectors import create_detector
|
2024-10-29 18:28:05 +03:00
|
|
|
from frigate.detectors.detector_config import (
|
|
|
|
|
BaseDetectorConfig,
|
|
|
|
|
InputDTypeEnum,
|
2025-04-24 02:06:06 +03:00
|
|
|
ModelConfig,
|
2024-10-29 18:28:05 +03:00
|
|
|
)
|
2023-07-06 17:28:50 +03:00
|
|
|
from frigate.util.builtin import EventsPerSecond, load_labels
|
2024-11-16 00:14:37 +03:00
|
|
|
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
2025-06-13 20:09:51 +03:00
|
|
|
from frigate.util.process import FrigateProcess
|
2020-02-09 16:39:24 +03:00
|
|
|
|
2025-04-15 16:55:38 +03:00
|
|
|
from .util import tensor_transform
|
|
|
|
|
|
2020-11-04 06:26:39 +03:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2020-08-22 15:05:20 +03:00
|
|
|
class ObjectDetector(ABC):
|
|
|
|
|
@abstractmethod
|
2024-10-01 00:40:46 +03:00
|
|
|
def detect(self, tensor_input, threshold: float = 0.4):
|
2020-08-22 15:05:20 +03:00
|
|
|
pass
|
|
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2025-08-22 16:11:48 +03:00
|
|
|
class BaseLocalDetector(ObjectDetector):
|
2022-11-04 05:23:09 +03:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2024-10-01 00:40:46 +03:00
|
|
|
detector_config: BaseDetectorConfig = None,
|
|
|
|
|
labels: str = None,
|
2022-11-04 05:23:09 +03:00
|
|
|
):
|
2020-09-13 15:46:38 +03:00
|
|
|
self.fps = EventsPerSecond()
|
2020-08-22 15:05:20 +03:00
|
|
|
if labels is None:
|
|
|
|
|
self.labels = {}
|
|
|
|
|
else:
|
|
|
|
|
self.labels = load_labels(labels)
|
|
|
|
|
|
2022-12-15 16:12:52 +03:00
|
|
|
if detector_config:
|
2025-03-03 17:16:14 +03:00
|
|
|
self.input_transform = tensor_transform(detector_config.model.input_tensor)
|
2024-10-29 18:28:05 +03:00
|
|
|
|
|
|
|
|
self.dtype = detector_config.model.input_dtype
|
2022-11-04 05:23:09 +03:00
|
|
|
else:
|
|
|
|
|
self.input_transform = None
|
2024-10-29 18:28:05 +03:00
|
|
|
self.dtype = InputDTypeEnum.int
|
2022-11-04 05:23:09 +03:00
|
|
|
|
2022-12-15 16:12:52 +03:00
|
|
|
self.detect_api = create_detector(detector_config)
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2025-08-22 16:11:48 +03:00
|
|
|
def _transform_input(self, tensor_input: np.ndarray) -> np.ndarray:
|
|
|
|
|
if self.input_transform:
|
|
|
|
|
tensor_input = np.transpose(tensor_input, self.input_transform)
|
|
|
|
|
|
|
|
|
|
if self.dtype == InputDTypeEnum.float:
|
|
|
|
|
tensor_input = tensor_input.astype(np.float32)
|
|
|
|
|
tensor_input /= 255
|
|
|
|
|
elif self.dtype == InputDTypeEnum.float_denorm:
|
|
|
|
|
tensor_input = tensor_input.astype(np.float32)
|
|
|
|
|
|
|
|
|
|
return tensor_input
|
|
|
|
|
|
2024-10-29 18:28:05 +03:00
|
|
|
def detect(self, tensor_input: np.ndarray, threshold=0.4):
|
2020-08-22 15:05:20 +03:00
|
|
|
detections = []
|
|
|
|
|
|
|
|
|
|
raw_detections = self.detect_raw(tensor_input)
|
|
|
|
|
|
|
|
|
|
for d in raw_detections:
|
2023-07-15 03:01:53 +03:00
|
|
|
if int(d[0]) < 0 or int(d[0]) >= len(self.labels):
|
|
|
|
|
logger.warning(f"Raw Detect returned invalid label: {d}")
|
|
|
|
|
continue
|
2020-08-22 15:05:20 +03:00
|
|
|
if d[1] < threshold:
|
|
|
|
|
break
|
2021-02-17 16:23:32 +03:00
|
|
|
detections.append(
|
|
|
|
|
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
|
|
|
|
)
|
2020-09-13 15:46:38 +03:00
|
|
|
self.fps.update()
|
2020-08-22 15:05:20 +03:00
|
|
|
return detections
|
|
|
|
|
|
2025-08-22 16:11:48 +03:00
|
|
|
|
|
|
|
|
class LocalObjectDetector(BaseLocalDetector):
|
2024-10-29 18:28:05 +03:00
|
|
|
def detect_raw(self, tensor_input: np.ndarray):
|
2025-08-22 16:11:48 +03:00
|
|
|
tensor_input = self._transform_input(tensor_input)
|
|
|
|
|
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
2024-10-29 18:28:05 +03:00
|
|
|
|
|
|
|
|
|
2025-08-22 16:11:48 +03:00
|
|
|
class AsyncLocalObjectDetector(BaseLocalDetector):
|
|
|
|
|
def async_send_input(self, tensor_input: np.ndarray, connection_id: str):
|
|
|
|
|
tensor_input = self._transform_input(tensor_input)
|
|
|
|
|
return self.detect_api.send_input(connection_id, tensor_input)
|
|
|
|
|
|
|
|
|
|
def async_receive_output(self):
|
|
|
|
|
return self.detect_api.receive_output()
|
2020-02-09 16:39:24 +03:00
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2025-06-13 20:09:51 +03:00
|
|
|
class DetectorRunner(FrigateProcess):
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
name,
|
|
|
|
|
detection_queue: Queue,
|
|
|
|
|
cameras: list[str],
|
|
|
|
|
avg_speed: Value,
|
|
|
|
|
start_time: Value,
|
2025-06-13 17:43:38 +03:00
|
|
|
config: FrigateConfig,
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
detector_config: BaseDetectorConfig,
|
2025-06-24 20:41:11 +03:00
|
|
|
stop_event: MpEvent,
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
) -> None:
|
2025-07-18 20:23:06 +03:00
|
|
|
super().__init__(stop_event, PROCESS_PRIORITY_HIGH, name=name, daemon=True)
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.detection_queue = detection_queue
|
|
|
|
|
self.cameras = cameras
|
|
|
|
|
self.avg_speed = avg_speed
|
|
|
|
|
self.start_time = start_time
|
2025-06-13 17:43:38 +03:00
|
|
|
self.config = config
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.detector_config = detector_config
|
|
|
|
|
self.outputs: dict = {}
|
2020-11-30 01:19:59 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
def create_output_shm(self, name: str):
|
|
|
|
|
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
|
|
|
|
|
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
|
|
|
|
self.outputs[name] = {"shm": out_shm, "np": out_np}
|
2021-02-17 16:23:32 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
def run(self) -> None:
|
2025-06-13 17:43:38 +03:00
|
|
|
self.pre_run_setup(self.config.logger)
|
2021-02-17 16:23:32 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
frame_manager = SharedMemoryFrameManager()
|
|
|
|
|
object_detector = LocalObjectDetector(detector_config=self.detector_config)
|
|
|
|
|
detector_publisher = ObjectDetectorPublisher()
|
2020-11-30 01:19:59 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
for name in self.cameras:
|
|
|
|
|
self.create_output_shm(name)
|
2020-02-09 16:39:24 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
while not self.stop_event.is_set():
|
|
|
|
|
try:
|
|
|
|
|
connection_id = self.detection_queue.get(timeout=1)
|
|
|
|
|
except queue.Empty:
|
|
|
|
|
continue
|
|
|
|
|
input_frame = frame_manager.get(
|
|
|
|
|
connection_id,
|
|
|
|
|
(
|
|
|
|
|
1,
|
|
|
|
|
self.detector_config.model.height,
|
|
|
|
|
self.detector_config.model.width,
|
|
|
|
|
3,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if input_frame is None:
|
|
|
|
|
logger.warning(f"Failed to get frame {connection_id} from SHM")
|
|
|
|
|
continue
|
2020-02-09 16:39:24 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
# detect and send the output
|
|
|
|
|
self.start_time.value = datetime.datetime.now().timestamp()
|
|
|
|
|
detections = object_detector.detect_raw(input_frame)
|
|
|
|
|
duration = datetime.datetime.now().timestamp() - self.start_time.value
|
|
|
|
|
frame_manager.close(connection_id)
|
2025-06-11 20:25:30 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
if connection_id not in self.outputs:
|
|
|
|
|
self.create_output_shm(connection_id)
|
2025-06-11 20:25:30 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.outputs[connection_id]["np"][:] = detections[:]
|
|
|
|
|
detector_publisher.publish(connection_id)
|
|
|
|
|
self.start_time.value = 0.0
|
2020-03-02 03:42:52 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.avg_speed.value = (self.avg_speed.value * 9 + duration) / 10
|
2021-02-17 16:23:32 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
detector_publisher.stop()
|
|
|
|
|
logger.info("Exited detection process...")
|
2023-02-04 05:15:47 +03:00
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2025-08-22 16:11:48 +03:00
|
|
|
class AsyncDetectorRunner(FrigateProcess):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
name,
|
|
|
|
|
detection_queue: Queue,
|
|
|
|
|
cameras: list[str],
|
|
|
|
|
avg_speed: Value,
|
|
|
|
|
start_time: Value,
|
|
|
|
|
config: FrigateConfig,
|
|
|
|
|
detector_config: BaseDetectorConfig,
|
|
|
|
|
stop_event: MpEvent,
|
|
|
|
|
) -> None:
|
|
|
|
|
super().__init__(stop_event, PROCESS_PRIORITY_HIGH, name=name, daemon=True)
|
|
|
|
|
self.detection_queue = detection_queue
|
|
|
|
|
self.cameras = cameras
|
|
|
|
|
self.avg_speed = avg_speed
|
|
|
|
|
self.start_time = start_time
|
|
|
|
|
self.config = config
|
|
|
|
|
self.detector_config = detector_config
|
|
|
|
|
self.outputs: dict = {}
|
|
|
|
|
self._frame_manager: SharedMemoryFrameManager | None = None
|
|
|
|
|
self._publisher: ObjectDetectorPublisher | None = None
|
|
|
|
|
self._detector: AsyncLocalObjectDetector | None = None
|
|
|
|
|
self.send_times = deque()
|
|
|
|
|
|
|
|
|
|
def create_output_shm(self, name: str):
|
|
|
|
|
out_shm = UntrackedSharedMemory(name=f"out-{name}", create=False)
|
|
|
|
|
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
|
|
|
|
self.outputs[name] = {"shm": out_shm, "np": out_np}
|
|
|
|
|
|
|
|
|
|
def _detect_worker(self) -> None:
|
|
|
|
|
logger.info("Starting Detect Worker Thread")
|
|
|
|
|
while not self.stop_event.is_set():
|
|
|
|
|
try:
|
|
|
|
|
connection_id = self.detection_queue.get(timeout=1)
|
|
|
|
|
except queue.Empty:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
input_frame = self._frame_manager.get(
|
|
|
|
|
connection_id,
|
|
|
|
|
(
|
|
|
|
|
1,
|
|
|
|
|
self.detector_config.model.height,
|
|
|
|
|
self.detector_config.model.width,
|
|
|
|
|
3,
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if input_frame is None:
|
|
|
|
|
logger.warning(f"Failed to get frame {connection_id} from SHM")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# mark start time and send to accelerator
|
|
|
|
|
self.send_times.append(time.perf_counter())
|
|
|
|
|
self._detector.async_send_input(input_frame, connection_id)
|
|
|
|
|
|
|
|
|
|
def _result_worker(self) -> None:
|
|
|
|
|
logger.info("Starting Result Worker Thread")
|
|
|
|
|
while not self.stop_event.is_set():
|
|
|
|
|
connection_id, detections = self._detector.async_receive_output()
|
|
|
|
|
|
|
|
|
|
if not self.send_times:
|
|
|
|
|
# guard; shouldn't happen if send/recv are balanced
|
|
|
|
|
continue
|
|
|
|
|
ts = self.send_times.popleft()
|
|
|
|
|
duration = time.perf_counter() - ts
|
|
|
|
|
|
|
|
|
|
# release input buffer
|
|
|
|
|
self._frame_manager.close(connection_id)
|
|
|
|
|
|
|
|
|
|
if connection_id not in self.outputs:
|
|
|
|
|
self.create_output_shm(connection_id)
|
|
|
|
|
|
|
|
|
|
# write results and publish
|
|
|
|
|
if detections is not None:
|
|
|
|
|
self.outputs[connection_id]["np"][:] = detections[:]
|
|
|
|
|
self._publisher.publish(connection_id)
|
|
|
|
|
|
|
|
|
|
# update timers
|
|
|
|
|
self.avg_speed.value = (self.avg_speed.value * 9 + duration) / 10
|
|
|
|
|
self.start_time.value = 0.0
|
|
|
|
|
|
|
|
|
|
def run(self) -> None:
|
|
|
|
|
self.pre_run_setup(self.config.logger)
|
|
|
|
|
|
|
|
|
|
self._frame_manager = SharedMemoryFrameManager()
|
|
|
|
|
self._publisher = ObjectDetectorPublisher()
|
|
|
|
|
self._detector = AsyncLocalObjectDetector(detector_config=self.detector_config)
|
|
|
|
|
|
|
|
|
|
for name in self.cameras:
|
|
|
|
|
self.create_output_shm(name)
|
|
|
|
|
|
|
|
|
|
t_detect = threading.Thread(target=self._detect_worker, daemon=True)
|
|
|
|
|
t_result = threading.Thread(target=self._result_worker, daemon=True)
|
|
|
|
|
t_detect.start()
|
|
|
|
|
t_result.start()
|
|
|
|
|
|
|
|
|
|
while not self.stop_event.is_set():
|
|
|
|
|
time.sleep(0.5)
|
|
|
|
|
|
|
|
|
|
self._publisher.stop()
|
|
|
|
|
logger.info("Exited async detection process...")
|
|
|
|
|
|
|
|
|
|
|
2022-11-04 05:23:09 +03:00
|
|
|
class ObjectDetectProcess:
|
2021-02-17 16:23:32 +03:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
2025-04-24 02:06:06 +03:00
|
|
|
name: str,
|
|
|
|
|
detection_queue: Queue,
|
2025-06-11 20:25:30 +03:00
|
|
|
cameras: list[str],
|
2025-06-13 17:43:38 +03:00
|
|
|
config: FrigateConfig,
|
2025-04-24 02:06:06 +03:00
|
|
|
detector_config: BaseDetectorConfig,
|
2025-06-24 20:41:11 +03:00
|
|
|
stop_event: MpEvent,
|
2021-02-17 16:23:32 +03:00
|
|
|
):
|
2020-11-04 15:28:07 +03:00
|
|
|
self.name = name
|
2025-06-11 20:25:30 +03:00
|
|
|
self.cameras = cameras
|
2020-10-10 14:57:43 +03:00
|
|
|
self.detection_queue = detection_queue
|
2025-04-24 02:06:06 +03:00
|
|
|
self.avg_inference_speed = Value("d", 0.01)
|
|
|
|
|
self.detection_start = Value("d", 0.0)
|
2025-06-13 20:09:51 +03:00
|
|
|
self.detect_process: FrigateProcess | None = None
|
2025-06-13 17:43:38 +03:00
|
|
|
self.config = config
|
2022-12-15 16:12:52 +03:00
|
|
|
self.detector_config = detector_config
|
2025-06-24 20:41:11 +03:00
|
|
|
self.stop_event = stop_event
|
2020-03-01 16:16:49 +03:00
|
|
|
self.start_or_restart()
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2020-09-22 05:02:00 +03:00
|
|
|
def stop(self):
|
2023-02-04 05:15:47 +03:00
|
|
|
# if the process has already exited on its own, just return
|
|
|
|
|
if self.detect_process and self.detect_process.exitcode:
|
|
|
|
|
return
|
2020-09-22 05:02:00 +03:00
|
|
|
self.detect_process.terminate()
|
2020-11-04 06:26:39 +03:00
|
|
|
logging.info("Waiting for detection process to exit gracefully...")
|
2020-09-22 05:02:00 +03:00
|
|
|
self.detect_process.join(timeout=30)
|
|
|
|
|
if self.detect_process.exitcode is None:
|
2024-04-20 14:16:43 +03:00
|
|
|
logging.info("Detection process didn't exit. Force killing...")
|
2020-09-22 05:02:00 +03:00
|
|
|
self.detect_process.kill()
|
|
|
|
|
self.detect_process.join()
|
2023-02-04 05:15:47 +03:00
|
|
|
logging.info("Detection process has exited...")
|
2020-02-09 16:39:24 +03:00
|
|
|
|
2020-03-01 16:16:49 +03:00
|
|
|
def start_or_restart(self):
|
|
|
|
|
self.detection_start.value = 0.0
|
2023-05-29 13:31:17 +03:00
|
|
|
if (self.detect_process is not None) and self.detect_process.is_alive():
|
2020-09-22 05:02:00 +03:00
|
|
|
self.stop()
|
2025-08-22 16:11:48 +03:00
|
|
|
|
|
|
|
|
# Async path for MemryX
|
|
|
|
|
if self.detector_config.type == "memryx":
|
|
|
|
|
self.detect_process = AsyncDetectorRunner(
|
|
|
|
|
f"frigate.detector:{self.name}",
|
|
|
|
|
self.detection_queue,
|
|
|
|
|
self.cameras,
|
|
|
|
|
self.avg_inference_speed,
|
|
|
|
|
self.detection_start,
|
|
|
|
|
self.config,
|
|
|
|
|
self.detector_config,
|
|
|
|
|
self.stop_event,
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
self.detect_process = DetectorRunner(
|
|
|
|
|
f"frigate.detector:{self.name}",
|
|
|
|
|
self.detection_queue,
|
|
|
|
|
self.cameras,
|
|
|
|
|
self.avg_inference_speed,
|
|
|
|
|
self.detection_start,
|
|
|
|
|
self.config,
|
|
|
|
|
self.detector_config,
|
|
|
|
|
self.stop_event,
|
|
|
|
|
)
|
2020-02-09 16:39:24 +03:00
|
|
|
self.detect_process.start()
|
|
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
|
|
|
|
class RemoteObjectDetector:
|
2025-04-24 02:06:06 +03:00
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
name: str,
|
|
|
|
|
labels: dict[int, str],
|
|
|
|
|
detection_queue: Queue,
|
|
|
|
|
model_config: ModelConfig,
|
|
|
|
|
stop_event: MpEvent,
|
|
|
|
|
):
|
2021-07-08 06:57:19 +03:00
|
|
|
self.labels = labels
|
2020-03-01 16:16:49 +03:00
|
|
|
self.name = name
|
2020-02-22 05:44:53 +03:00
|
|
|
self.fps = EventsPerSecond()
|
2020-03-01 16:16:49 +03:00
|
|
|
self.detection_queue = detection_queue
|
2023-02-04 17:58:45 +03:00
|
|
|
self.stop_event = stop_event
|
2024-11-16 00:14:37 +03:00
|
|
|
self.shm = UntrackedSharedMemory(name=self.name, create=False)
|
2021-02-17 16:23:32 +03:00
|
|
|
self.np_shm = np.ndarray(
|
2022-11-04 05:23:09 +03:00
|
|
|
(1, model_config.height, model_config.width, 3),
|
|
|
|
|
dtype=np.uint8,
|
|
|
|
|
buffer=self.shm.buf,
|
2021-02-17 16:23:32 +03:00
|
|
|
)
|
2024-11-16 00:14:37 +03:00
|
|
|
self.out_shm = UntrackedSharedMemory(name=f"out-{self.name}", create=False)
|
2021-02-17 16:23:32 +03:00
|
|
|
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.detector_subscriber = ObjectDetectorSubscriber(name)
|
2021-02-17 16:23:32 +03:00
|
|
|
|
|
|
|
|
def detect(self, tensor_input, threshold=0.4):
|
2020-02-09 16:39:24 +03:00
|
|
|
detections = []
|
2020-03-01 16:16:49 +03:00
|
|
|
|
2023-02-04 17:58:45 +03:00
|
|
|
if self.stop_event.is_set():
|
|
|
|
|
return detections
|
|
|
|
|
|
2025-11-04 03:42:59 +03:00
|
|
|
# Drain any stale detection results from the ZMQ buffer before making a new request
|
|
|
|
|
# This prevents reading detection results from a previous request
|
|
|
|
|
# NOTE: This should never happen, but can in some rare cases
|
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
|
|
|
|
|
except zmq.Again:
|
|
|
|
|
break
|
|
|
|
|
|
2020-09-22 05:02:00 +03:00
|
|
|
# copy input to shared memory
|
|
|
|
|
self.np_shm[:] = tensor_input[:]
|
|
|
|
|
self.detection_queue.put(self.name)
|
2025-06-11 20:25:30 +03:00
|
|
|
result = self.detector_subscriber.check_for_update()
|
2020-10-12 05:28:58 +03:00
|
|
|
|
|
|
|
|
# if it timed out
|
|
|
|
|
if result is None:
|
|
|
|
|
return detections
|
2020-03-01 16:16:49 +03:00
|
|
|
|
2020-09-24 14:58:23 +03:00
|
|
|
for d in self.out_np_shm:
|
2020-03-01 16:16:49 +03:00
|
|
|
if d[1] < threshold:
|
|
|
|
|
break
|
2021-02-17 16:23:32 +03:00
|
|
|
detections.append(
|
|
|
|
|
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
|
|
|
|
)
|
2020-02-22 05:44:53 +03:00
|
|
|
self.fps.update()
|
2020-08-30 01:42:41 +03:00
|
|
|
return detections
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2020-10-10 14:57:43 +03:00
|
|
|
def cleanup(self):
|
2025-06-11 20:25:30 +03:00
|
|
|
self.detector_subscriber.stop()
|
2020-10-10 14:57:43 +03:00
|
|
|
self.shm.unlink()
|
2020-11-04 15:28:07 +03:00
|
|
|
self.out_shm.unlink()
|