mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-04 02:05:21 +03:00
WIP
This commit is contained in:
parent
9f51347f9c
commit
dfb178d233
@ -28,11 +28,14 @@ logger = logging.getLogger("EdgeTPUModel")
|
|||||||
|
|
||||||
class YOLOv5Tfl(DetectionApi):
|
class YOLOv5Tfl(DetectionApi):
|
||||||
def __init__(self, det_device=None, model_config=None):
|
def __init__(self, det_device=None, model_config=None):
|
||||||
|
self.labels = load_labels(
|
||||||
|
model_config.labelmap_path
|
||||||
|
) # Just to be able to print human readable labels
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Creates an object for running a Yolov5 model on an EdgeTPU or a Desktop
|
Creates an object for running a Yolov5 model on an EdgeTPU or a Desktop
|
||||||
Inputs:
|
Inputs:
|
||||||
- model_file: path to edgetpu-compiled tflite file
|
- model_file: path to edgetpu-compiled tflite file
|
||||||
- names_file: yaml names file (yolov5 format)
|
|
||||||
- conf_thresh: detection threshold
|
- conf_thresh: detection threshold
|
||||||
- iou_thresh: NMS threshold
|
- iou_thresh: NMS threshold
|
||||||
- desktop: option to run model on a desktop
|
- desktop: option to run model on a desktop
|
||||||
@ -42,11 +45,11 @@ class YOLOv5Tfl(DetectionApi):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self.model_file = model_config.path
|
self.model_file = model_config.path
|
||||||
self.labels = load_labels(model_config.labelmap_path)
|
|
||||||
self.desktop = True # Should be cpu?
|
self.desktop = True # Should be cpu?
|
||||||
self.conf_thresh = 0.25
|
self.conf_thresh = 0.25
|
||||||
self.iou_thresh = 0.45
|
self.iou_thresh = 0.45
|
||||||
self.filter_classes = None
|
self.filter_classes = None
|
||||||
|
# self.filter_classes = [15, 16] # cat, dog
|
||||||
self.agnostic_nms = False
|
self.agnostic_nms = False
|
||||||
self.max_det = 1000
|
self.max_det = 1000
|
||||||
|
|
||||||
@ -121,10 +124,9 @@ class YOLOv5Tfl(DetectionApi):
|
|||||||
tensor_input = np.squeeze(tensor_input, axis=0)
|
tensor_input = np.squeeze(tensor_input, axis=0)
|
||||||
results = self.forward(tensor_input)
|
results = self.forward(tensor_input)
|
||||||
det = results[0]
|
det = results[0]
|
||||||
# logger.info(f"detections {len(det)}")
|
|
||||||
detections = np.zeros((20, 6), np.float32)
|
detections = np.zeros((20, 6), np.float32)
|
||||||
i = 0
|
i = 0
|
||||||
for *xyxy, conf, cls in reversed(det):
|
for *xyxy, conf, cls in det:
|
||||||
detections[i] = [
|
detections[i] = [
|
||||||
int(cls),
|
int(cls),
|
||||||
float(conf),
|
float(conf),
|
||||||
@ -135,8 +137,6 @@ class YOLOv5Tfl(DetectionApi):
|
|||||||
]
|
]
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
logger.info(f"{self.labels[int(cls)], int(cls), float(conf)}")
|
|
||||||
|
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
def forward(self, x: np.ndarray, with_nms=True) -> np.ndarray:
|
def forward(self, x: np.ndarray, with_nms=True) -> np.ndarray:
|
||||||
@ -237,79 +237,6 @@ class Colors:
|
|||||||
return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
|
return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4))
|
||||||
|
|
||||||
|
|
||||||
def plot_one_box(
|
|
||||||
box, im, color=(128, 128, 128), txt_color=(255, 255, 255), label=None, line_width=3
|
|
||||||
):
|
|
||||||
# Plots one xyxy box on image im with label
|
|
||||||
assert (
|
|
||||||
im.data.contiguous
|
|
||||||
), "Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image."
|
|
||||||
lw = line_width or max(int(min(im.size) / 200), 2) # line width
|
|
||||||
|
|
||||||
c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
|
||||||
|
|
||||||
cv2.rectangle(im, c1, c2, color, thickness=lw, lineType=cv2.LINE_AA)
|
|
||||||
if label:
|
|
||||||
tf = max(lw - 1, 1) # font thickness
|
|
||||||
txt_width, txt_height = cv2.getTextSize(
|
|
||||||
label, 0, fontScale=lw / 3, thickness=tf
|
|
||||||
)[0]
|
|
||||||
c2 = c1[0] + txt_width, c1[1] - txt_height - 3
|
|
||||||
cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
|
|
||||||
cv2.putText(
|
|
||||||
im,
|
|
||||||
label,
|
|
||||||
(c1[0], c1[1] - 2),
|
|
||||||
0,
|
|
||||||
lw / 3,
|
|
||||||
txt_color,
|
|
||||||
thickness=tf,
|
|
||||||
lineType=cv2.LINE_AA,
|
|
||||||
)
|
|
||||||
return im
|
|
||||||
|
|
||||||
|
|
||||||
def resize_and_pad(image, desired_size):
|
|
||||||
old_size = image.shape[:2]
|
|
||||||
ratio = float(desired_size / max(old_size))
|
|
||||||
new_size = tuple([int(x * ratio) for x in old_size])
|
|
||||||
|
|
||||||
# new_size should be in (width, height) format
|
|
||||||
|
|
||||||
image = cv2.resize(image, (new_size[1], new_size[0]))
|
|
||||||
|
|
||||||
delta_w = desired_size - new_size[1]
|
|
||||||
delta_h = desired_size - new_size[0]
|
|
||||||
|
|
||||||
pad = (delta_w, delta_h)
|
|
||||||
|
|
||||||
color = [100, 100, 100]
|
|
||||||
new_im = cv2.copyMakeBorder(
|
|
||||||
image, 0, delta_h, 0, delta_w, cv2.BORDER_CONSTANT, value=color
|
|
||||||
)
|
|
||||||
|
|
||||||
return new_im, pad
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_tensor(img, max_size, debug=False):
|
|
||||||
"""
|
|
||||||
Reshapes an input image into a square with sides max_size
|
|
||||||
"""
|
|
||||||
if type(img) is str:
|
|
||||||
img = cv2.imread(img)
|
|
||||||
|
|
||||||
resized, pad = resize_and_pad(img, max_size)
|
|
||||||
resized = resized.astype(np.float32)
|
|
||||||
|
|
||||||
if debug:
|
|
||||||
cv2.imwrite("intermediate.png", resized)
|
|
||||||
|
|
||||||
# Normalise!
|
|
||||||
resized /= 255.0
|
|
||||||
|
|
||||||
return img, resized, pad
|
|
||||||
|
|
||||||
|
|
||||||
def xyxy2xywh(x):
|
def xyxy2xywh(x):
|
||||||
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
||||||
y = np.copy(x)
|
y = np.copy(x)
|
||||||
|
|||||||
@ -638,15 +638,15 @@ def mjpeg_feed(camera_name):
|
|||||||
@bp.route("/<camera_name>/latest.jpg")
|
@bp.route("/<camera_name>/latest.jpg")
|
||||||
def latest_frame(camera_name):
|
def latest_frame(camera_name):
|
||||||
draw_options = {
|
draw_options = {
|
||||||
"bounding_boxes": request.args.get("bbox", type=int),
|
"bounding_boxes": request.args.get("bbox", default=1, type=int),
|
||||||
"timestamp": request.args.get("timestamp", type=int),
|
"timestamp": request.args.get("timestamp", type=int),
|
||||||
"zones": request.args.get("zones", type=int),
|
"zones": request.args.get("zones", type=int),
|
||||||
"mask": request.args.get("mask", type=int),
|
"mask": request.args.get("mask", type=int),
|
||||||
"motion_boxes": request.args.get("motion", type=int),
|
"motion_boxes": request.args.get("motion", default=1, type=int),
|
||||||
"regions": request.args.get("regions", type=int),
|
"regions": request.args.get("regions", type=int),
|
||||||
}
|
}
|
||||||
resize_quality = request.args.get("quality", default=70, type=int)
|
resize_quality = request.args.get("quality", default=70, type=int)
|
||||||
save_output = request.args.get("save_output", default=0, type=int)
|
save_output = request.args.get("save_output", default=1, type=int)
|
||||||
|
|
||||||
if camera_name in current_app.frigate_config.cameras:
|
if camera_name in current_app.frigate_config.cameras:
|
||||||
frame = current_app.detected_frames_processor.get_current_frame(
|
frame = current_app.detected_frames_processor.get_current_frame(
|
||||||
@ -660,7 +660,10 @@ def latest_frame(camera_name):
|
|||||||
|
|
||||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
if save_output == 1:
|
if save_output == 1:
|
||||||
cv2.imwrite(f'/tmp/{camera_name}.{time.time_ns() // 1000000}.jpeg', frame)
|
cv2.imwrite(
|
||||||
|
f"/tmp/debug/{camera_name}.yolov5.cat.t25.{time.time_ns() // 1000000}.jpeg",
|
||||||
|
frame,
|
||||||
|
)
|
||||||
|
|
||||||
ret, jpg = cv2.imencode(
|
ret, jpg = cv2.imencode(
|
||||||
".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
|
".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), resize_quality]
|
||||||
|
|||||||
@ -45,11 +45,8 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.fps = EventsPerSecond()
|
self.fps = EventsPerSecond()
|
||||||
if labels is None:
|
if labels is None:
|
||||||
self.labels = {}
|
self.labels = {}
|
||||||
# else:
|
else:
|
||||||
# self.labels = load_labels(labels)
|
self.labels = load_labels(labels)
|
||||||
|
|
||||||
if model_config.labelmap_path:
|
|
||||||
self.labels = load_labels(model_config.labelmap_path)
|
|
||||||
|
|
||||||
if model_config:
|
if model_config:
|
||||||
self.input_transform = tensor_transform(model_config.input_tensor)
|
self.input_transform = tensor_transform(model_config.input_tensor)
|
||||||
@ -232,18 +229,23 @@ class RemoteObjectDetector:
|
|||||||
self.np_shm[:] = tensor_input[:]
|
self.np_shm[:] = tensor_input[:]
|
||||||
self.event.clear()
|
self.event.clear()
|
||||||
self.detection_queue.put(self.name)
|
self.detection_queue.put(self.name)
|
||||||
result = self.event.wait(timeout=10.0)
|
result = self.event.wait(timeout=100.0)
|
||||||
|
|
||||||
# if it timed out
|
# if it timed out
|
||||||
if result is None:
|
if result is None:
|
||||||
return detections
|
return detections
|
||||||
|
# file_object = open("/tmp/detections.yolov5.txt", "a")
|
||||||
|
|
||||||
for d in self.out_np_shm:
|
for d in self.out_np_shm:
|
||||||
if d[1] < threshold:
|
# if d[1] < threshold:
|
||||||
|
if d[1] < 0.25:
|
||||||
break
|
break
|
||||||
detections.append(
|
detections.append(
|
||||||
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
|
||||||
)
|
)
|
||||||
|
# file_object.write(f"{self.labels[int(d[0])]},{float(d[1])}\n")
|
||||||
|
# file_object.close()
|
||||||
|
logger.info(f"end detections")
|
||||||
self.fps.update()
|
self.fps.update()
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user