mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-03 09:45:22 +03:00
Load model and submit for inference.
Sucessfully load model and initialize OpenVino engine with either CPU or GPU as device. Does not parse results for objects.
This commit is contained in:
parent
4b675692ec
commit
cf03f62088
@ -53,6 +53,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
echo 'deb http://deb.debian.org/debian testing main non-free' >/etc/apt/sources.list.d/debian-testing.list
|
echo 'deb http://deb.debian.org/debian testing main non-free' >/etc/apt/sources.list.d/debian-testing.list
|
||||||
apt-get -qq update
|
apt-get -qq update
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
|
intel-opencl-icd \
|
||||||
mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools
|
mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools
|
||||||
rm -f /etc/apt/sources.list.d/debian-testing.list
|
rm -f /etc/apt/sources.list.d/debian-testing.list
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -10,79 +10,48 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class OvDetector(DetectionApi):
|
class OvDetector(DetectionApi):
|
||||||
def __init__(self, det_device=None, model_path=None, num_threads=1):
|
def __init__(self, det_device=None, model_path=None, num_threads=1):
|
||||||
self.ovCore = ov.Core()
|
self.ov_core = ov.Core()
|
||||||
self.ovModel = self.ovCore.read_model(model_path)
|
self.ov_model = self.ov_core.read_model(model_path)
|
||||||
self.interpreter = self.ovCore.compile_model(
|
self.interpreter = self.ov_core.compile_model(
|
||||||
model=self.ovModel, device_name=det_device
|
model=self.ov_model, device_name=det_device
|
||||||
)
|
)
|
||||||
|
logger.info(f"Model Input Shape: {self.interpreter.input().shape}")
|
||||||
self.interpreter.allocate_tensors()
|
logger.info(f"Model Output Shape: {self.interpreter.output().shape}")
|
||||||
|
|
||||||
self.tensor_input_details = self.interpreter.get_input_details()
|
|
||||||
self.tensor_output_details = self.interpreter.get_output_details()
|
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input):
|
||||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
tensor_transpose = np.reshape(tensor_input, self.interpreter.input().shape)
|
||||||
self.interpreter.invoke()
|
|
||||||
|
|
||||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
infer_request = self.interpreter.create_infer_request()
|
||||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
results = infer_request.infer([tensor_transpose])
|
||||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
|
||||||
count = int(
|
# class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
# scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||||
)
|
# count = int(
|
||||||
|
# self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||||
|
# # )
|
||||||
|
|
||||||
|
# class_ids = results[0, 0, :, 1]
|
||||||
|
# # class_ids = [0, 0, 1, 1]
|
||||||
|
# print(class_ids)
|
||||||
|
|
||||||
|
# scores = results
|
||||||
|
# print(scores)
|
||||||
|
|
||||||
detections = np.zeros((20, 6), np.float32)
|
detections = np.zeros((20, 6), np.float32)
|
||||||
|
# i = 0
|
||||||
for i in range(count):
|
# for object_detected in results["detection_out"][0, 0, :]:
|
||||||
if scores[i] < 0.4 or i == 20:
|
# if object_detected[2] < 0.1 or i == 20:
|
||||||
break
|
# break
|
||||||
detections[i] = [
|
# detections.append(
|
||||||
class_ids[i],
|
# [
|
||||||
float(scores[i]),
|
# object_detected[1],
|
||||||
boxes[i][0],
|
# float(object_detected[2]),
|
||||||
boxes[i][1],
|
# object_detected[3],
|
||||||
boxes[i][2],
|
# object_detected[4],
|
||||||
boxes[i][3],
|
# object_detected[5],
|
||||||
]
|
# object_detected[6],
|
||||||
|
# ]
|
||||||
return detections
|
# )
|
||||||
|
# i += 1
|
||||||
|
|
||||||
class GpuOpenVino(DetectionApi):
|
|
||||||
def __init__(self, det_device=None, model_path=None, num_threads=1):
|
|
||||||
self.interpreter = tflite.Interpreter(
|
|
||||||
model_path=model_path or "/cpu_model.tflite", num_threads=3
|
|
||||||
)
|
|
||||||
|
|
||||||
self.interpreter.allocate_tensors()
|
|
||||||
|
|
||||||
self.tensor_input_details = self.interpreter.get_input_details()
|
|
||||||
self.tensor_output_details = self.interpreter.get_output_details()
|
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
|
||||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
|
||||||
self.interpreter.invoke()
|
|
||||||
|
|
||||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
|
||||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
|
||||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
|
||||||
count = int(
|
|
||||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
|
||||||
)
|
|
||||||
|
|
||||||
detections = np.zeros((20, 6), np.float32)
|
|
||||||
|
|
||||||
for i in range(count):
|
|
||||||
if scores[i] < 0.4 or i == 20:
|
|
||||||
break
|
|
||||||
detections[i] = [
|
|
||||||
class_ids[i],
|
|
||||||
float(scores[i]),
|
|
||||||
boxes[i][0],
|
|
||||||
boxes[i][1],
|
|
||||||
boxes[i][2],
|
|
||||||
boxes[i][3],
|
|
||||||
]
|
|
||||||
|
|
||||||
return detections
|
return detections
|
||||||
|
|||||||
@ -3,7 +3,8 @@ Flask == 2.2.*
|
|||||||
imutils == 0.5.*
|
imutils == 0.5.*
|
||||||
matplotlib == 3.6.*
|
matplotlib == 3.6.*
|
||||||
mypy == 0.942
|
mypy == 0.942
|
||||||
numpy == 1.22.*
|
# numpy == 1.22.*
|
||||||
|
numpy == 1.19.*
|
||||||
opencv-python-headless == 4.5.5.*
|
opencv-python-headless == 4.5.5.*
|
||||||
paho-mqtt == 1.6.*
|
paho-mqtt == 1.6.*
|
||||||
peewee == 3.15.*
|
peewee == 3.15.*
|
||||||
@ -18,3 +19,4 @@ scipy == 1.8.*
|
|||||||
setproctitle == 1.2.*
|
setproctitle == 1.2.*
|
||||||
ws4py == 0.5.*
|
ws4py == 0.5.*
|
||||||
zeroconf == 0.39.4
|
zeroconf == 0.39.4
|
||||||
|
openvino == 2022.*
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user