From 410be5adaa9a6a600e98ec3397d730f74a64e519 Mon Sep 17 00:00:00 2001 From: Michael Wei Date: Sat, 28 Nov 2020 09:27:19 +0000 Subject: [PATCH] tensor input dump and UI to classify --- README.md | 9 ++++ detect_objects.py | 116 +++++++++++++++++++++++++++++++++++++++++++-- frigate/edgetpu.py | 12 ++++- frigate/video.py | 4 +- 4 files changed, 133 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index af6aa219a..75cb04534 100644 --- a/README.md +++ b/README.md @@ -238,6 +238,15 @@ objects: # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) threshold: 0.85 +# Configuraiton for saving data for training. +# Set to TRUE to save inputs to tensorflow. Note that every input is saved, so this will generate a lot of data. +saveTensorInputs: false +# Path to save tensor inputs to. +saveTensorPath: /clips/tensorInputs +# Path the categorizer UI will save categorized inputs to. +# The categorizer UI can be accessed at http:///tensorInputUi +saveTensorCategorizedPath: /clips/tensorCategorized + # Required: configuration section for cameras cameras: # Required: name of the camera diff --git a/detect_objects.py b/detect_objects.py index f239069b6..fd0e3e86a 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -15,8 +15,9 @@ import multiprocessing as mp import subprocess as sp import numpy as np import logging -from flask import Flask, Response, make_response, jsonify, request +from flask import Flask, Response, make_response, jsonify, request, send_from_directory import paho.mqtt.client as mqtt +import shutil from frigate.video import capture_camera, track_camera, get_ffmpeg_input, get_frame_shape, CameraCapture, start_or_restart_ffmpeg from frigate.object_processing import TrackedObjectProcessor @@ -121,8 +122,9 @@ class FrigateWatchdog(threading.Thread): camera_process['process_fps'].value = 0.0 camera_process['detection_fps'].value = 0.0 camera_process['read_start'].value = 0.0 - process = mp.Process(target=track_camera, args=(name, self.config, - self.detection_queue, self.out_events[name], self.tracked_objects_queue, camera_process, self.stop_event)) + process = mp.Process(target=track_camera, args=(name, self.config[name], + self.detection_queue, self.out_events[name], self.tracked_objects_queue, camera_process, + self.stop_event, CONFIG)) process.daemon = True camera_process['process'] = process process.start() @@ -265,7 +267,7 @@ def main(): camera_process_info[name]['capture_process'] = capture_process camera_process = mp.Process(target=track_camera, args=(name, config, - detection_queue, out_events[name], tracked_objects_queue, camera_process_info[name], stop_event)) + detection_queue, out_events[name], tracked_objects_queue, camera_process_info[name], stop_event, CONFIG)) camera_process.daemon = True camera_process_info[name]['process'] = camera_process @@ -417,7 +419,7 @@ def main(): return response else: return "Camera named {} not found".format(camera_name), 404 - + def imagestream(camera_name, fps, height): while True: # max out at specified FPS @@ -433,6 +435,110 @@ def main(): yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n') + @app.route('/tensorInput/') + def send_tensor_image(path): + return send_from_directory(CONFIG["saveTensorPath"], path) + + @app.route('/tensorInputClassify//') + def classify_tensor_image(path, category): + if category == "delete": + os.unlink(os.path.join(CONFIG["saveTensorPath"], path)) + return "deleted" + else: + category_path = os.path.join(CONFIG["saveTensorCategorizedPath"], category) + os.makedirs(category_path, exist_ok=True) + shutil.move(os.path.join(CONFIG["saveTensorPath"], path), os.path.join(category_path, path)) + return "ok" + + @app.route('/tensorInputNext') + def get_next_tensor_image(): + file = None + for root, dirs, files in os.walk(CONFIG["saveTensorPath"]): + for name in files: + file = name + break + return "No more inputs" if file is None else file + + @app.route('/tensorInputUi') + def get_tensor_ui(): + return """ + +Simple classification UI + + + + + + +
+
+ + + +
+ + + +
Key Category
+ + + """ + app.run(host='0.0.0.0', port=WEB_PORT, debug=False) object_processor.join() diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py index 5398b91e5..4c409b216 100644 --- a/frigate/edgetpu.py +++ b/frigate/edgetpu.py @@ -10,6 +10,7 @@ import numpy as np import tflite_runtime.interpreter as tflite from tflite_runtime.interpreter import load_delegate from frigate.util import EventsPerSecond, listen, SharedMemoryFrameManager +import cv2 def load_labels(path, encoding='utf-8'): """Loads labels from file (with or without index numbers). @@ -159,7 +160,7 @@ class EdgeTPUProcess(): self.detect_process.start() class RemoteObjectDetector(): - def __init__(self, name, labels, detection_queue, event): + def __init__(self, name, labels, detection_queue, event, config): self.labels = load_labels(labels) self.name = name self.fps = EventsPerSecond() @@ -169,6 +170,7 @@ class RemoteObjectDetector(): self.np_shm = np.ndarray((1,300,300,3), dtype=np.uint8, buffer=self.shm.buf) self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False) self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf) + self.config = config def detect(self, tensor_input, threshold=.4): detections = [] @@ -177,6 +179,14 @@ class RemoteObjectDetector(): self.np_shm[:] = tensor_input[:] self.event.clear() self.detection_queue.put(self.name) + + if self.config["saveTensorInputs"]: + root_path = self.config['saveTensorPath'] + os.makedirs(root_path, exist_ok=True) + + file_path = os.path.join(root_path, f"{self.name}.{datetime.datetime.now().timestamp()}.jpg") + cv2.imwrite(file_path, tensor_input[0]) + result = self.event.wait(timeout=10.0) # if it timed out diff --git a/frigate/video.py b/frigate/video.py index c60714ca1..22109d643 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -241,7 +241,7 @@ def capture_camera(name, config, process_info, stop_event): camera_watchdog.start() camera_watchdog.join() -def track_camera(name, config, detection_queue, result_connection, detected_objects_queue, process_info, stop_event): +def track_camera(name, config, detection_queue, result_connection, detected_objects_queue, process_info, stop_event, global_config): listen() frame_queue = process_info['frame_queue'] @@ -275,7 +275,7 @@ def track_camera(name, config, detection_queue, result_connection, detected_obje mask[:] = 255 motion_detector = MotionDetector(frame_shape, mask, resize_factor=6) - object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection) + object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, global_config) object_tracker = ObjectTracker(10)