This commit is contained in:
OmriAx 2025-03-10 10:55:59 +02:00
parent 469f9c0a83
commit c674d142dc

View File

@ -29,11 +29,18 @@ from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum, InputTensorEnum, PixelFormatEnum, InputDTypeEnum
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
InputTensorEnum,
PixelFormatEnum,
InputDTypeEnum,
)
from PIL import Image, ImageDraw, ImageFont
logger = logging.getLogger(__name__)
# ----------------- ResponseStore Class ----------------- #
class ResponseStore:
"""
@ -41,6 +48,7 @@ class ResponseStore:
to their results. Threads can wait on the condition variable until
their request's result appears.
"""
def __init__(self):
self.responses = {} # Maps request_id -> (original_input, infer_results)
self.lock = threading.Lock()
@ -53,12 +61,16 @@ class ResponseStore:
def get(self, request_id, timeout=None):
with self.cond:
if not self.cond.wait_for(lambda: request_id in self.responses, timeout=timeout):
if not self.cond.wait_for(
lambda: request_id in self.responses, timeout=timeout
):
raise TimeoutError(f"Timeout waiting for response {request_id}")
return self.responses.pop(request_id)
# ----------------- Utility Functions ----------------- #
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
"""
Resize an image with unchanged aspect ratio using padding.
@ -78,9 +90,12 @@ def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarr
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
x_offset = (model_w - new_w) // 2
y_offset = (model_h - new_h) // 2
padded_image[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = resized_image
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
resized_image
)
return padded_image
# ----------------- Global Constants ----------------- #
DETECTOR_KEY = "hailo8l"
ARCH = None
@ -89,13 +104,16 @@ H8L_DEFAULT_MODEL = "yolov6n.hef"
H8_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8/yolov6n.hef"
H8L_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8l/yolov6n.hef"
def detect_hailo_arch():
try:
result = subprocess.run(['hailortcli', 'fw-control', 'identify'], capture_output=True, text=True)
result = subprocess.run(
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
)
if result.returncode != 0:
logger.error(f"Inference error: {result.stderr}")
return None
for line in result.stdout.split('\n'):
for line in result.stdout.split("\n"):
if "Device Architecture" in line:
if "HAILO8L" in line:
return "hailo8l"
@ -107,6 +125,7 @@ def detect_hailo_arch():
logger.error(f"Inference error: {e}")
return None
# ----------------- HailoAsyncInference Class ----------------- #
class HailoAsyncInference:
def __init__(
@ -139,11 +158,21 @@ class HailoAsyncInference:
def _set_input_type(self, input_type: Optional[str] = None) -> None:
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
def _set_output_type(self, output_type_dict: Optional[Dict[str, str]] = None) -> None:
def _set_output_type(
self, output_type_dict: Optional[Dict[str, str]] = None
) -> None:
for output_name, output_type in output_type_dict.items():
self.infer_model.output(output_name).set_format_type(getattr(FormatType, output_type))
self.infer_model.output(output_name).set_format_type(
getattr(FormatType, output_type)
)
def callback(self, completion_info, bindings_list: List, input_batch: List, request_ids: List[int]):
def callback(
self,
completion_info,
bindings_list: List,
input_batch: List,
request_ids: List[int],
):
if completion_info.exception:
logger.error(f"Inference error: {completion_info.exception}")
else:
@ -162,7 +191,9 @@ class HailoAsyncInference:
output_buffers = {
output_info.name: np.empty(
self.infer_model.output(output_info.name).shape,
dtype=getattr(np, str(output_info.format.type).split(".")[1].lower())
dtype=getattr(
np, str(output_info.format.type).split(".")[1].lower()
),
)
for output_info in self.hef.get_output_vstream_infos()
}
@ -170,7 +201,7 @@ class HailoAsyncInference:
output_buffers = {
name: np.empty(
self.infer_model.output(name).shape,
dtype=getattr(np, self.output_type[name].lower())
dtype=getattr(np, self.output_type[name].lower()),
)
for name in self.output_type
}
@ -203,25 +234,50 @@ class HailoAsyncInference:
input_batch=input_batch,
request_ids=request_ids,
bindings_list=bindings_list,
)
),
)
job.wait(100)
# ----------------- HailoDetector Class ----------------- #
class HailoDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: 'HailoDetectorConfig'):
def __init__(self, detector_config: "HailoDetectorConfig"):
global ARCH
ARCH = detect_hailo_arch()
self.cache_dir = MODEL_CACHE_DIR
self.device_type = detector_config.device
self.model_height = detector_config.model.height if hasattr(detector_config.model, "height") else None
self.model_width = detector_config.model.width if hasattr(detector_config.model, "width") else None
self.model_type = detector_config.model.model_type if hasattr(detector_config.model, "model_type") else None
self.tensor_format = detector_config.model.input_tensor if hasattr(detector_config.model, "input_tensor") else None
self.pixel_format = detector_config.model.input_pixel_format if hasattr(detector_config.model, "input_pixel_format") else None
self.input_dtype = detector_config.model.input_dtype if hasattr(detector_config.model, "input_dtype") else None
self.model_height = (
detector_config.model.height
if hasattr(detector_config.model, "height")
else None
)
self.model_width = (
detector_config.model.width
if hasattr(detector_config.model, "width")
else None
)
self.model_type = (
detector_config.model.model_type
if hasattr(detector_config.model, "model_type")
else None
)
self.tensor_format = (
detector_config.model.input_tensor
if hasattr(detector_config.model, "input_tensor")
else None
)
self.pixel_format = (
detector_config.model.input_pixel_format
if hasattr(detector_config.model, "input_pixel_format")
else None
)
self.input_dtype = (
detector_config.model.input_dtype
if hasattr(detector_config.model, "input_dtype")
else None
)
self.output_type = "FLOAT32"
self.set_path_and_url(detector_config.model.path)
self.working_model_path = self.check_and_prepare()
@ -238,11 +294,13 @@ class HailoDetector(DetectionApi):
self.working_model_path,
self.input_queue,
self.response_store,
self.batch_size
self.batch_size,
)
self.input_shape = self.inference_engine.get_input_shape()
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
self.inference_thread = threading.Thread(target=self.inference_engine.run, daemon=True)
self.inference_thread = threading.Thread(
target=self.inference_engine.run, daemon=True
)
self.inference_thread.start()
except Exception as e:
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
@ -261,7 +319,11 @@ class HailoDetector(DetectionApi):
self.url = None
def is_url(self, url: str) -> bool:
return url.startswith("http://") or url.startswith("https://") or url.startswith("www.")
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("www.")
)
@staticmethod
def extract_model_name(path: str = None, url: str = None) -> str:
@ -328,9 +390,13 @@ class HailoDetector(DetectionApi):
self.input_queue.put((request_id, tensor_input))
try:
original_input, infer_results = self.response_store.get(request_id, timeout=10.0)
original_input, infer_results = self.response_store.get(
request_id, timeout=10.0
)
except TimeoutError:
logger.error(f"Timeout waiting for inference results for request {request_id}")
logger.error(
f"Timeout waiting for inference results for request {request_id}"
)
return np.zeros((20, 6), dtype=np.float32)
if isinstance(infer_results, list) and len(infer_results) == 1:
@ -363,7 +429,9 @@ class HailoDetector(DetectionApi):
def preprocess(self, image):
if isinstance(image, np.ndarray):
processed = preprocess_tensor(image, self.input_shape[1], self.input_shape[0])
processed = preprocess_tensor(
image, self.input_shape[1], self.input_shape[0]
)
return np.expand_dims(processed, axis=0)
else:
raise ValueError("Unsupported image format for preprocessing")
@ -384,6 +452,7 @@ class HailoDetector(DetectionApi):
"""Destructor to ensure cleanup when the object is deleted."""
self.close()
# ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]