Remove yolov8 from dev

This commit is contained in:
Nicolas Mowen 2024-03-27 15:11:21 -06:00
parent 94c8edaff7
commit 13e4cbb31b
5 changed files with 4 additions and 160 deletions

View File

@ -105,58 +105,6 @@ detectors:
device: pci
```
### Yolov8 On Coral
It is possible to use the [ultralytics yolov8](https://github.com/ultralytics/ultralytics) pretrained models with the Google Coral processors.
#### Setup
You need to download yolov8 model files suitable for the EdgeTPU. Frigate can do this automatically with the `DOWNLOAD_YOLOV8={0 | 1}` environment variable either from the command line
```bash
$ docker run ... -e DOWNLOAD_YOLOV8=1 \
...
```
or when using docker compose:
```yaml
services:
frigate:
---
environment:
DOWNLOAD_YOLOV8: "1"
```
When this variable is set then frigate will at startup fetch [yolov8.small.models.tar.gz](https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz) and extract it into the `/config/model_cache/yolov8/` directory.
The following files suitable for the EdgeTPU detector will be available under `/config/model_cache/yolov8/`:
- `yolov8[ns]_320x320_edgetpu.tflite` -- nano (n) and small (s) sized models that have been trained using the coco dataset (90 classes)
- `yolov8[ns]-oiv7_320x320_edgetpu.tflite` -- model files that have been trained using the google open images v7 dataset (601 classes)
- `labels.txt` and `labels-frigate.txt` -- full and aggregated labels for the coco dataset models
- `labels-oiv7.txt` and `labels-oiv7-frigate.txt` -- labels for the oiv7 dataset models
The aggregated label files contain renamed labels leaving only `person`, `vehicle`, `animal` and `bird` classes. The oiv7 trained models contain 601 classes and so are difficult to configure manually -- using aggregate labels is recommended.
Larger models (of `m` and `l` size and also at `640x640` resolution) can be found at https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ but have to be installed manually.
The oiv7 models have been trained using a larger google open images v7 dataset. They also contain a lot more detection classes (over 600) so using aggregate label files is recommended. The large number of classes leads to lower baseline for detection probability values and also for higher resource consumption (they are slower to evaluate).
#### Configuration
```yaml
model:
labelmap_path: /config/model_cache/yolov8/labels.txt
model_type: yolov8
detectors:
coral:
type: edgetpu
device: usb
model:
path: /config/model_cache/yolov8/yolov8n_320x320_edgetpu.tflite
```
## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.

View File

@ -6,7 +6,6 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import yolov8_postprocess
try:
from tflite_runtime.interpreter import Interpreter, load_delegate
@ -58,26 +57,9 @@ class EdgeTpuTfl(DetectionApi):
self.model_type = detector_config.model.model_type
def detect_raw(self, tensor_input):
if self.model_type == "yolov8":
scale, zero_point = self.tensor_input_details[0]["quantization"]
tensor_input = (
(tensor_input - scale * zero_point * 255) * (1.0 / (scale * 255))
).astype(self.tensor_input_details[0]["dtype"])
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
if self.model_type == "yolov8":
scale, zero_point = self.tensor_output_details[0]["quantization"]
tensor_output = self.interpreter.get_tensor(
self.tensor_output_details[0]["index"]
)
tensor_output = (tensor_output.astype(np.float32) - zero_point) * scale
model_input_shape = self.tensor_input_details[0]["shape"]
tensor_output[:, [0, 2]] *= model_input_shape[2]
tensor_output[:, [1, 3]] *= model_input_shape[1]
return yolov8_postprocess(model_input_shape, tensor_output)
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]

View File

@ -6,7 +6,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess
from frigate.detectors.util import preprocess
logger = logging.getLogger(__name__)
@ -31,23 +31,6 @@ class ONNXDetector(DetectionApi):
)
raise
assert (
detector_config.model.model_type == "yolov8"
), "ONNX: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "ONNX: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"ONNX: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"ONNX: No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}")
@ -62,4 +45,4 @@ class ONNXDetector(DetectionApi):
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
return yolov8_postprocess(model_input_shape, tensor_output)
raise Exception("No models are currently supported via onnx. See the docs for more info.")

View File

@ -1,5 +1,4 @@
import ctypes
import glob
import logging
import os
import subprocess
@ -11,7 +10,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess
from frigate.detectors.util import preprocess
logger = logging.getLogger(__name__)
@ -75,27 +74,6 @@ class ROCmDetector(DetectionApi):
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
assert (
detector_config.model.model_type == "yolov8"
), "AMD/ROCm: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "AMD/ROCm: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"AMD/ROCm: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
@ -140,4 +118,4 @@ class ROCmDetector(DetectionApi):
addr, shape=detector_result.get_shape().lens()
)
return yolov8_postprocess(model_input_shape, tensor_output)
raise Exception("No models are currently supported for rocm. See the docs for more info.")

View File

@ -34,50 +34,3 @@ def preprocess(tensor_input, model_input_shape, model_input_element_type):
None,
swapRB=False,
)
def yolov8_postprocess(
model_input_shape,
tensor_output,
box_count=20,
score_threshold=0.5,
nms_threshold=0.5,
):
model_box_count = tensor_output.shape[2]
probs = tensor_output[0, 4:, :]
all_ids = np.argmax(probs, axis=0)
all_confidences = probs.T[np.arange(model_box_count), all_ids]
all_boxes = tensor_output[0, 0:4, :].T
mask = all_confidences > score_threshold
class_ids = all_ids[mask]
confidences = all_confidences[mask]
cx, cy, w, h = all_boxes[mask].T
if model_input_shape[3] == 3:
scale_y, scale_x = 1 / model_input_shape[1], 1 / model_input_shape[2]
else:
scale_y, scale_x = 1 / model_input_shape[2], 1 / model_input_shape[3]
detections = np.stack(
(
class_ids,
confidences,
scale_y * (cy - h / 2),
scale_x * (cx - w / 2),
scale_y * (cy + h / 2),
scale_x * (cx + w / 2),
),
axis=1,
)
if detections.shape[0] > box_count:
# if too many detections, do nms filtering to suppress overlapping boxes
boxes = np.stack((cx - w / 2, cy - h / 2, w, h), axis=1)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
detections = detections[indexes]
# if still too many, trim the rest by confidence
if detections.shape[0] > box_count:
detections = detections[
np.argpartition(detections[:, 1], -box_count)[-box_count:]
]
detections = detections.copy()
detections.resize((box_count, 6))
return detections