detectors/openvino: allow f32 models, use util to process yolov8

This commit is contained in:
Indrek Mandre 2024-02-11 11:38:53 +02:00
parent cd5f4b1534
commit 5dc83ab2da

View File

@ -7,6 +7,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.detectors.util import preprocess, yolov8_postprocess
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -33,7 +34,9 @@ class OvDetector(DetectionApi):
model=self.ov_model, device_name=detector_config.device model=self.ov_model, device_name=detector_config.device
) )
logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}") logger.info(
f"Model Input Shape: {self.interpreter.input(0).shape} {self.interpreter.input(0).element_type.to_dtype()}"
)
self.output_indexes = 0 self.output_indexes = 0
while True: while True:
@ -80,6 +83,11 @@ class OvDetector(DetectionApi):
] ]
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
tensor_input = preprocess(
tensor_input,
self.interpreter.inputs[0].shape,
self.interpreter.inputs[0].element_type.to_dtype(),
)
infer_request = self.interpreter.create_infer_request() infer_request = self.interpreter.create_infer_request()
infer_request.infer([tensor_input]) infer_request.infer([tensor_input])
@ -132,28 +140,8 @@ class OvDetector(DetectionApi):
) )
return detections return detections
elif self.ov_model_type == ModelTypeEnum.yolov8: elif self.ov_model_type == ModelTypeEnum.yolov8:
out_tensor = infer_request.get_output_tensor() out_tensor = infer_request.get_output_tensor().data
results = out_tensor.data[0] return yolov8_postprocess(self.interpreter.inputs[0].shape, out_tensor)
output_data = np.transpose(results)
scores = np.max(output_data[:, 4:], axis=1)
if len(scores) == 0:
return np.zeros((20, 6), np.float32)
scores = np.expand_dims(scores, axis=1)
# add scores to the last column
dets = np.concatenate((output_data, scores), axis=1)
# filter out lines with scores below threshold
dets = dets[dets[:, -1] > 0.5, :]
# limit to top 20 scores, descending order
ordered = dets[dets[:, -1].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[4:-1]),
object_detected[-1],
object_detected[:4],
)
return detections
elif self.ov_model_type == ModelTypeEnum.yolov5: elif self.ov_model_type == ModelTypeEnum.yolov5:
out_tensor = infer_request.get_output_tensor() out_tensor = infer_request.get_output_tensor()
output_data = out_tensor.data[0] output_data = out_tensor.data[0]