mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-07 11:45:24 +03:00
update model selection
This commit is contained in:
parent
a0ee5b10a7
commit
a7cbd3fd4f
@ -316,12 +316,12 @@ detectors: # required
|
||||
type: rknn # required
|
||||
# core mask for npu
|
||||
core_mask: 0
|
||||
# yolov8 model in rknn format to use; allowed calues: n, s, m, l, x
|
||||
yolov8_rknn_model: n
|
||||
|
||||
model: # required
|
||||
# path to .rknn model file
|
||||
path:
|
||||
# name of yolov8 model or path to your own .rknn model file
|
||||
# possible names of yolov8 models are: default-yolov8n,
|
||||
# default-yolov8s, default-yolov8m, default-yolov8l, default-yolov8x"
|
||||
path: default-yolov8n
|
||||
# width and height of detection frames
|
||||
width: 320
|
||||
height: 320
|
||||
@ -338,7 +338,6 @@ Explanation for rknn specific options:
|
||||
- `core_mask: 0b001` use only core0.
|
||||
- `core_mask: 0b011` use core0 and core1.
|
||||
- `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled.
|
||||
- **yolov8_rknn_model** see section below.
|
||||
|
||||
### Choosing a model
|
||||
|
||||
@ -363,23 +362,12 @@ $ cat /sys/kernel/debug/rknpu/load
|
||||
|
||||
:::
|
||||
|
||||
- By default the rknn detector uses the yolov8**n** model (`yolov8_rknn_model: n`). This model comes with the image, so no further steps than those mentioned above are necessary.
|
||||
- If you want to use are more precise model, you can set `yolov8_rknn_model:` to `s`, `m`, `l` or `x`. But additional steps are required:
|
||||
1. Mount the directory `/model/download/` to your system using one of the below methods. Of course, you can change to destination folder.
|
||||
- If you start frigate with docker run, append this flag to your command: `-v /model/download:./data/rknn-models`
|
||||
- If you use docker compose, append this to your `volumes` block: `/model/download:./data/rknn-models`
|
||||
2. Download the rknn model.
|
||||
- If your server has an internet connection, it will download the model.
|
||||
- Otherwise, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases/tag/latest) on another device and place it in the `rknn-models` folder that you mounted to your system.
|
||||
3. Check the inference speeds in the frigate WebUI under System. If you use bigger models the inference speed will increase. If it gets too high, consider enabling more NPU cores using `core_mask` option.
|
||||
- Finally, you can also provide your own model. Note, that you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can mount a directory to the image (docker run flag: `-v /model/custom:./data/my-rknn-models` or docker compose: add `/model/custom:./data/my-rknn-models` to the `volumes` block) and place your model file in that directory. Then you need to pass the path to your model using the `path` option of your `model` block like this:
|
||||
- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary.
|
||||
- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option.
|
||||
- If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`.
|
||||
- If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases/tag/latest) using another device and place it in the `config/model_cache/rknn` on your system.
|
||||
- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this:
|
||||
```yaml
|
||||
model:
|
||||
path: /model/custom/my-rknn-model.rknn
|
||||
path: /config/model_cache/rknn/my-rknn-model.rknn
|
||||
```
|
||||
|
||||
:::caution
|
||||
|
||||
The `path` option of the `model` block will overwrite the `yolov8_rknn_model` option of the `detectors` block. So if you want to use one of the provided yolov8 models, make sure to not specify the `path` option.
|
||||
|
||||
:::
|
||||
|
||||
@ -3,8 +3,6 @@ import os.path
|
||||
import urllib.request
|
||||
from typing import Literal
|
||||
|
||||
import cv2
|
||||
import cv2.dnn
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
@ -24,43 +22,46 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "rknn"
|
||||
|
||||
yolov8_rknn_models = {
|
||||
"default-yolov8n": "n",
|
||||
"default-yolov8s": "s",
|
||||
"default-yolov8m": "m",
|
||||
"default-yolov8l": "l",
|
||||
"default-yolov8x": "x",
|
||||
}
|
||||
|
||||
|
||||
class RknnDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
yolov8_rknn_model: Literal["n", "s", "m", "l", "x"] = "n"
|
||||
core_mask: int = Field(default=0, ge=0, le=7, title="Core mask for NPU.")
|
||||
|
||||
|
||||
class Rknn(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, config: RknnDetectorConfig):
|
||||
self.model_path = config.model.path or "/models/yolov8n-320x320.rknn"
|
||||
self.model_path = config.model.path or "default-yolov8n"
|
||||
self.core_mask = config.core_mask
|
||||
self.height = config.model.height
|
||||
self.width = config.model.width
|
||||
|
||||
if config.model.path != None:
|
||||
self.model_path = config.model.path
|
||||
else:
|
||||
if config.yolov8_rknn_model == "n":
|
||||
if self.model_path in yolov8_rknn_models:
|
||||
if self.model_path == "default-yolov8n":
|
||||
self.model_path = "/models/yolov8n-320x320.rknn"
|
||||
else:
|
||||
# check if user mounted /models/download/
|
||||
if not os.path.isdir("/models/download/"):
|
||||
logger.error(
|
||||
'Make sure to mount the directory "/models/download/" to your system. Otherwise the file will be downloaded at every restart.'
|
||||
model_suffix = yolov8_rknn_models[self.model_path]
|
||||
self.model_path = (
|
||||
"/config/model_cache/rknn/yolov8{}-320x320.rknn".format(
|
||||
model_suffix
|
||||
)
|
||||
raise Exception(
|
||||
'Make sure to mount the directory "/models/download/" to your system. Otherwise the file will be downloaded at every restart.'
|
||||
)
|
||||
|
||||
self.model_path = "/models/download/yolov8{}-320x320.rknn".format(
|
||||
config.yolov8_rknn_model
|
||||
)
|
||||
if os.path.isfile(self.model_path) == False:
|
||||
logger.info(
|
||||
"Downloading yolov8{} model.".format(config.yolov8_rknn_model)
|
||||
)
|
||||
|
||||
os.makedirs("/config/model_cache/rknn", exist_ok=True)
|
||||
if not os.path.isfile(self.model_path):
|
||||
logger.info("Downloading yolov8{} model.".format(model_suffix))
|
||||
urllib.request.urlretrieve(
|
||||
"https://github.com/MarcA711/rknn-models/releases/download/latest/yolov8{}-320x320.rknn".format(
|
||||
config.yolov8_rknn_model
|
||||
model_suffix
|
||||
),
|
||||
self.model_path,
|
||||
)
|
||||
@ -89,10 +90,6 @@ class Rknn(DetectionApi):
|
||||
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
||||
)
|
||||
|
||||
self.height = config.model.height
|
||||
self.width = config.model.width
|
||||
self.core_mask = config.core_mask
|
||||
|
||||
from rknnlite.api import RKNNLite
|
||||
|
||||
self.rknn = RKNNLite(verbose=False)
|
||||
@ -117,19 +114,25 @@ class Rknn(DetectionApi):
|
||||
detections: array with shape (20, 6) with 20 rows of (class, confidence, y_min, x_min, y_max, x_max)
|
||||
"""
|
||||
|
||||
results = np.transpose(results[0, :, :, 0]) # array shape (2100, 84)
|
||||
results = np.transpose(results[0, :, :, 0]) # array shape (2100, 84)
|
||||
scores = np.max(
|
||||
results[:, 4:], axis=1
|
||||
) # array shape (2100,); max confidence of each row
|
||||
results = [np.where(scores > 0.4)]
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
# remove lines with score scores < 0.4
|
||||
filtered_arg = np.argwhere(scores > 0.4)
|
||||
results = results[filtered_arg[:, 0]]
|
||||
scores = scores[filtered_arg[:, 0]]
|
||||
|
||||
num_detections = len(scores)
|
||||
|
||||
if num_detections == 0:
|
||||
return np.zeros((20, 6), np.float32)
|
||||
|
||||
if num_detections > 20:
|
||||
max_ind = np.argpartition(scores, -20)[:-20]
|
||||
results = results[max_ind]
|
||||
scores = scores[max_ind]
|
||||
top_arg = np.argpartition(scores, -20)[-20:]
|
||||
results = results[top_arg]
|
||||
scores = scores[top_arg]
|
||||
num_detections = 20
|
||||
|
||||
classes = np.argmax(results[:, 4:], axis=1)
|
||||
@ -145,6 +148,7 @@ class Rknn(DetectionApi):
|
||||
)
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
detections[:num_detections, 0] = classes
|
||||
detections[:num_detections, 1] = scores
|
||||
detections[:num_detections, 2:] = boxes
|
||||
|
||||
Loading…
Reference in New Issue
Block a user