complete edgetpu refactoring

This commit is contained in:
YS 2022-01-02 18:44:20 +03:00
parent c7b3330265
commit f8118adf6b
2 changed files with 56 additions and 59 deletions

View File

@ -960,7 +960,7 @@ class FrigateConfig(FrigateBaseModel):
# check runtime config # check runtime config
decoder_config = ( decoder_config = (
camera_config.ffmpeg camera_config.ffmpeg
if "ffmpeg" in camera_config if camera_config.ffmpeg is not None
else camera_config.gstreamer else camera_config.gstreamer
) )
assigned_roles = list( assigned_roles = list(

View File

@ -9,10 +9,10 @@ from typing import Dict
import numpy as np import numpy as np
# import tflite_runtime.interpreter as tflite import tflite_runtime.interpreter as tflite
# from tflite_runtime.interpreter import load_delegate from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond from frigate.util import EventsPerSecond
from .object_detector import ObjectDetector from .object_detector import ObjectDetector
@ -27,7 +27,7 @@ def object_detector_factory(detector_config: DetectorConfig, model_path: str):
): ):
return None return None
object_detector = LocalObjectDetector( object_detector = LocalObjectDetector(
tf_device=detector_config.device, tf_device=detector_config.type,
model_path=model_path, model_path=model_path,
num_threads=detector_config.num_threads, num_threads=detector_config.num_threads,
) )
@ -49,77 +49,74 @@ class LocalObjectDetector(ObjectDetector):
edge_tpu_delegate = None edge_tpu_delegate = None
# if tf_device != "cpu": if tf_device != "cpu":
# try: try:
# logger.info(f"Attempting to load TPU as {device_config['device']}") logger.info(f"Attempting to load TPU as {device_config['device']}")
# edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config) edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
# logger.info("TPU found") logger.info("TPU found")
# self.interpreter = tflite.Interpreter( self.interpreter = tflite.Interpreter(
# model_path=model_path or "/edgetpu_model.tflite", model_path=model_path or "/edgetpu_model.tflite",
# experimental_delegates=[edge_tpu_delegate], experimental_delegates=[edge_tpu_delegate],
# ) )
# except ValueError: except ValueError:
# logger.error( logger.error(
# "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors." "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
# ) )
# raise raise
# else: else:
# logger.warning( logger.warning(
# "CPU detectors are not recommended and should only be used for testing or for trial purposes." "CPU detectors are not recommended and should only be used for testing or for trial purposes."
# ) )
# self.interpreter = tflite.Interpreter( self.interpreter = tflite.Interpreter(
# model_path=model_path or "/cpu_model.tflite", num_threads=num_threads model_path=model_path or "/cpu_model.tflite", num_threads=num_threads
# ) )
# self.interpreter.allocate_tensors() self.interpreter.allocate_tensors()
# self.tensor_input_details = self.interpreter.get_input_details() self.tensor_input_details = self.interpreter.get_input_details()
# self.tensor_output_details = self.interpreter.get_output_details() self.tensor_output_details = self.interpreter.get_output_details()
def detect(self, tensor_input, threshold=0.4): def detect(self, tensor_input, threshold=0.4):
# TODO: called from process_clip # TODO: process_clip
detections = [] detections = []
assert False, "implement detect() for process_clip.py"
# raw_detections = self.detect_raw(tensor_input) raw_detections = self.detect_raw(tensor_input)
# for d in raw_detections: for d in raw_detections:
# if d[1] < threshold: if d[1] < threshold:
# break break
# detections.append( detections.append(
# (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5])) (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
# ) )
# self.fps.update() self.fps.update()
return detections return detections
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
logger.error(">>>>>>>>>> detect raw")
# Expand dimensions [height, width, 3] ince the model expects images to have shape [1, height, width, 3] # Expand dimensions [height, width, 3] ince the model expects images to have shape [1, height, width, 3]
tensor_input = np.expand_dims(tensor_input, axis=0) tensor_input = np.expand_dims(tensor_input, axis=0)
# self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
# self.interpreter.invoke() self.interpreter.invoke()
# boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
# class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
# scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
# count = int( count = int(
# self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0] self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
# ) )
detections = np.zeros((20, 6), np.float32) detections = np.zeros((20, 6), np.float32)
# for i in range(count): for i in range(count):
# if scores[i] < 0.4 or i == 20: if scores[i] < 0.4 or i == 20:
# break break
# detections[i] = [ detections[i] = [
# class_ids[i], class_ids[i],
# float(scores[i]), float(scores[i]),
# boxes[i][0], boxes[i][0],
# boxes[i][1], boxes[i][1],
# boxes[i][2], boxes[i][2],
# boxes[i][3], boxes[i][3],
# ] ]
return detections return detections