apply changes from review and fix post process bug

This commit is contained in:
MarcA711 2023-11-18 11:47:49 +00:00
parent e0be59b3be
commit 435ca33376
4 changed files with 38 additions and 37 deletions

View File

@ -9,18 +9,6 @@ COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
FROM wget as rk-downloads
RUN wget -qO ffmpeg https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/latest/ffmpeg
RUN wget -qO ffprobe https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/latest/ffprobe
RUN wget -qO librknnrt_rk356x.so https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so
RUN wget -qO librknnrt_rk3588.so https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so
RUN wget -qO yolov8n-320x320-rk3562.rknn https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn
RUN wget -qO yolov8n-320x320-rk3566.rknn https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn
RUN wget -qO yolov8n-320x320-rk3568.rknn https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn
RUN wget -qO yolov8n-320x320-rk3588.rknn https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn
FROM deps AS rk-deps FROM deps AS rk-deps
ARG TARGETARCH ARG TARGETARCH
@ -30,17 +18,15 @@ RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY --from=rootfs / / COPY --from=rootfs / /
COPY --from=rk-downloads /rootfs/librknnrt_rk356x.so /usr/lib/ ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/
COPY --from=rk-downloads /rootfs/librknnrt_rk3588.so /usr/lib/ ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/
COPY --from=rk-downloads /rootfs/yolov8n-320x320-rk3562.rknn /models/ ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/
COPY --from=rk-downloads /rootfs/yolov8n-320x320-rk3566.rknn /models/ ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/
COPY --from=rk-downloads /rootfs/yolov8n-320x320-rk3568.rknn /models/ ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/
COPY --from=rk-downloads /rootfs/yolov8n-320x320-rk3588.rknn /models/ ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
COPY --from=rk-downloads /rootfs/ffmpeg /usr/lib/btbn-ffmpeg/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/latest/ffmpeg /usr/lib/btbn-ffmpeg/bin/
COPY --from=rk-downloads /rootfs/ffprobe /usr/lib/btbn-ffmpeg/bin/ ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/latest/ffprobe /usr/lib/btbn-ffmpeg/bin/
RUN chmod +x /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN chmod +x /usr/lib/btbn-ffmpeg/bin/ffprobe

View File

@ -304,7 +304,7 @@ These SoCs come with a NPU that will highly speed up detection.
### Setup ### Setup
RKNN support is provided using the `-rk` suffix for the docker image. Moreover, privileged mode must be enabled by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file.
### Configuration ### Configuration
@ -376,3 +376,16 @@ $ cat /sys/kernel/debug/rknpu/load
model: model:
path: /config/model_cache/rknn/my-rknn-model.rknn path: /config/model_cache/rknn/my-rknn-model.rknn
``` ```
:::tip
When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 using:
```yaml
detectors:
rknn:
type: rknn
core_mask: 0b111
```
:::

View File

@ -103,7 +103,7 @@ Frigate supports SBCs with the following Rockchip SoCs:
- RV1103/RV1106 - RV1103/RV1106
- RK3562 - RK3562
Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 25-40 ms. Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms.
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)

View File

@ -24,7 +24,7 @@ DETECTOR_KEY = "rknn"
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"] supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"]
yolov8_rknn_models = { yolov8_suffix = {
"default-yolov8n": "n", "default-yolov8n": "n",
"default-yolov8s": "s", "default-yolov8s": "s",
"default-yolov8m": "m", "default-yolov8m": "m",
@ -32,7 +32,6 @@ yolov8_rknn_models = {
"default-yolov8x": "x", "default-yolov8x": "x",
} }
class RknnDetectorConfig(BaseDetectorConfig): class RknnDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
core_mask: int = Field(default=0, ge=0, le=7, title="Core mask for NPU.") core_mask: int = Field(default=0, ge=0, le=7, title="Core mask for NPU.")
@ -45,9 +44,7 @@ class Rknn(DetectionApi):
# find out SoC # find out SoC
try: try:
with open("/proc/device-tree/compatible") as file: with open("/proc/device-tree/compatible") as file:
device_string = file.read() soc = file.read().split(",")[-1].strip('\x00')
device_string_parts = device_string.split(",")
soc = device_string_parts[-1]
except FileNotFoundError: except FileNotFoundError:
logger.error("Make sure to run docker in privileged mode.") logger.error("Make sure to run docker in privileged mode.")
raise Exception("Make sure to run docker in privileged mode.") raise Exception("Make sure to run docker in privileged mode.")
@ -64,16 +61,21 @@ class Rknn(DetectionApi):
) )
) )
if "rk356" in soc:
os.rename("/usr/lib/librknnrt_rk356x.so", "/usr/lib/librknnrt.so")
elif "rk3588" in soc:
os.rename("/usr/lib/librknnrt_rk3588.so", "/usr/lib/librknnrt.so")
self.model_path = config.model.path or "default-yolov8n" self.model_path = config.model.path or "default-yolov8n"
self.core_mask = config.core_mask self.core_mask = config.core_mask
self.height = config.model.height self.height = config.model.height
self.width = config.model.width self.width = config.model.width
if self.model_path in yolov8_rknn_models: if self.model_path in yolov8_suffix:
if self.model_path == "default-yolov8n": if self.model_path == "default-yolov8n":
self.model_path = "/models/yolov8n-320x320-{soc}.rknn".format(soc=soc) self.model_path = "/models/rknn/yolov8n-320x320-{soc}.rknn".format(soc=soc)
else: else:
model_suffix = yolov8_rknn_models[self.model_path] model_suffix = yolov8_suffix[self.model_path]
self.model_path = ( self.model_path = (
"/config/model_cache/rknn/yolov8{suffix}-320x320-{soc}.rknn".format( "/config/model_cache/rknn/yolov8{suffix}-320x320-{soc}.rknn".format(
suffix=model_suffix, soc=soc suffix=model_suffix, soc=soc
@ -166,10 +168,10 @@ class Rknn(DetectionApi):
boxes = np.transpose( boxes = np.transpose(
np.vstack( np.vstack(
( (
results[:, 1] - 0.5 * results[:, 3], (results[:, 1] - 0.5 * results[:, 3]) / self.height,
results[:, 0] - 0.5 * results[:, 2], (results[:, 0] - 0.5 * results[:, 2]) / self.width,
results[:, 3] + 0.5 * results[:, 3], (results[:, 1] + 0.5 * results[:, 3]) / self.height,
results[:, 2] + 0.5 * results[:, 2], (results[:, 0] + 0.5 * results[:, 2]) / self.width,
) )
) )
) )