add openvino support for the dfine model

This commit is contained in:
Jason Hunter 2025-03-19 00:02:13 -04:00
parent 5514fc11b9
commit b5bc75391c

View File

@ -10,7 +10,7 @@ from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolov9 from frigate.util.model import post_process_dfine, post_process_yolov9
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,6 +29,7 @@ class OvDetector(DetectionApi):
ModelTypeEnum.yolonas, ModelTypeEnum.yolonas,
ModelTypeEnum.yolov9, ModelTypeEnum.yolov9,
ModelTypeEnum.yolox, ModelTypeEnum.yolox,
ModelTypeEnum.dfine,
] ]
def __init__(self, detector_config: OvDetectorConfig): def __init__(self, detector_config: OvDetectorConfig):
@ -163,6 +164,21 @@ class OvDetector(DetectionApi):
infer_request = self.interpreter.create_infer_request() infer_request = self.interpreter.create_infer_request()
# TODO: see if we can use shared_memory=True # TODO: see if we can use shared_memory=True
input_tensor = ov.Tensor(array=tensor_input) input_tensor = ov.Tensor(array=tensor_input)
if self.ov_model_type == ModelTypeEnum.dfine:
infer_request.set_tensor("images", input_tensor)
target_sizes_tensor = ov.Tensor(
np.array([[self.h, self.w]], dtype=np.int64)
)
infer_request.set_tensor("orig_target_sizes", target_sizes_tensor)
infer_request.infer()
tensor_output = (
infer_request.get_output_tensor(0).data,
infer_request.get_output_tensor(1).data,
infer_request.get_output_tensor(2).data,
)
return post_process_dfine(tensor_output, self.w, self.h)
infer_request.infer(input_tensor) infer_request.infer(input_tensor)
detections = np.zeros((20, 6), np.float32) detections = np.zeros((20, 6), np.float32)