initial support for rockchip boards

This commit is contained in:
MarcA711 2023-10-29 21:41:27 +00:00
parent 16dc9f4bf7
commit a02466c2f2
10 changed files with 287 additions and 1 deletions

View File

@ -79,6 +79,15 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push rk build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-rk
*.cache-from=type=gha
jetson_jp4_build:
runs-on: ubuntu-latest
name: Jetson Jetpack 4

View File

@ -0,0 +1,97 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG SLIM_BASE=debian:11-slim
FROM ${SLIM_BASE} AS slim-base
# Add wheels for RKNN-Toolkit-Lite2
FROM wheels AS wheels-rk
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels-rk.txt
# ToDo: download RKNN-Toolkit-Lite2 wheel using requirements-wheels-rk.txt
COPY docker/rockchip/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl /wheels/
# Collect deps in a single layer
FROM deps-rootfs AS deps-rootfs-rk
COPY docker/rockchip/yolov8n-320x320.rknn /models/
# ToDo: Download librknnrt.so using wget; depends on RKNN-Toolkit-Lite2 wheel
COPY docker/rockchip/librknnrt.so /usr/lib/
# Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc)
FROM slim-base AS deps
ARG TARGETARCH
ARG DEBIAN_FRONTEND
# http://stackoverflow.com/questions/48162574/ddg#49462622
ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
# https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(Native-GPU-Support)
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
/deps/install_deps.sh
RUN --mount=type=bind,from=wheels-rk,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs-rk / /
RUN ldconfig
EXPOSE 5000
EXPOSE 1935
EXPOSE 8554
EXPOSE 8555/tcp 8555/udp
# Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB
ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T"
ENTRYPOINT ["/init"]
CMD []
HEALTHCHECK --start-period=120s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
# Frigate deps with Node.js and NPM for devcontainer
FROM deps AS devcontainer
# Do not start the actual Frigate service on devcontainer as it will be started by VSCode
# But start a fake service for simulating the logs
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
RUN mkdir -p /opt/frigate \
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
# Install Node 16
RUN apt-get update \
&& apt-get install wget -y \
&& wget -qO- https://deb.nodesource.com/setup_16.x | bash - \
&& apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/* \
&& npm install -g npm@9
WORKDIR /workspace/frigate
RUN apt-get update \
&& apt-get install make -y \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
pip3 install -r requirements-dev.txt
CMD ["sleep", "infinity"]
# Frigate final container
FROM deps AS frigate
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rockchip/rknn.py /opt/frigate/frigate/detectors/plugins/

Binary file not shown.

View File

@ -0,0 +1,3 @@
hide-warnings == 0.17
# ToDo: this would break everytime RKNN-Toolkit-Lite2 gets an update, since the link will be invalid; so for now just copy the file from the Frigate Github repo
# rknn-toolkit-lite2 @ https://github.com/rockchip-linux/rknn-toolkit2/raw/master/rknn_toolkit_lite2/packages/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl

27
docker/rockchip/rk.hcl Normal file
View File

@ -0,0 +1,27 @@
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wheels"
}
target deps-rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "deps-rootfs"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "rootfs"
}
target rk {
dockerfile = "docker/rockchip/Dockerfile"
contexts = {
wheels = "target:wheels",
deps-rootfs = "target:deps-rootfs",
rootfs = "target:rootfs"
}
platforms = ["linux/arm64"]
}

10
docker/rockchip/rk.mk Normal file
View File

@ -0,0 +1,10 @@
BOARDS += rk
local-rk: version
docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk
build-rk: version
docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
push-rk: build-rk
docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk

94
docker/rockchip/rknn.py Normal file
View File

@ -0,0 +1,94 @@
import logging
import numpy as np
import cv2
import cv2.dnn
from hide_warnings import hide_warnings
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from typing import Literal
from pydantic import Field
logger = logging.getLogger(__name__)
DETECTOR_KEY = "rknn"
class RknnDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
score_thresh: float = Field(default=0.5, ge=0, le= 1, title="Minimal confidence for detection.")
nms_thresh: float = Field(default=0.45, ge=0, le= 1, title="IoU threshold for non-maximum suppression.")
class Rknn(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: RknnDetectorConfig):
self.height = config.model.height
self.width = config.model.width
self.score_thresh = config.score_thresh
self.nms_thresh = config.nms_thresh
self.model_path = config.model.path or "/models/yolov8n-320x320.rknn"
from rknnlite.api import RKNNLite
self.rknn = RKNNLite(verbose=False)
if self.rknn.load_rknn(self.model_path) != 0:
logger.error('Error initializing rknn model.')
if self.rknn.init_runtime() != 0:
logger.error('Error initializing rknn runtime.')
def __del__(self):
self.rknn.release()
def postprocess(self, results):
"""
Processes yolov8 output.
Args:
results: array with shape: (1, 84, n, 1) where n depends on yolov8 model size (for 320x320 model n=2100)
Returns:
detections: array with shape (20, 6) with 20 rows of (class, confidence, y_min, x_min, y_max, x_max)
"""
results = np.transpose(results[0,:,:,0]) # array shape (2100, 84)
classes = np.argmax(results[:, 4:], axis=1) # array shape (2100,); index of class with max confidence of each row
scores = np.max(results[:, 4:], axis=1) # array shape (2100,); max confidence of each row
# array shape (2100, 4); bounding box of each row
boxes = np.transpose(
np.vstack((
results[:,0] - 0.5 * results[:,2],
results[:,1] - 0.5 * results[:,3],
results[:,2],
results[:,3])
)
)
# indices of rows with confidence > SCORE_THRESH with Non-maximum Suppression (NMS)
result_boxes = cv2.dnn.NMSBoxes(boxes, scores, self.score_thresh, self.nms_thresh, 0.5)
detections = np.zeros((20, 6), np.float32)
for i in range(len(result_boxes)):
if i >= 20:
break
index = result_boxes[i]
detections[i] = [
classes[index],
scores[index],
(boxes[index][1]) / self.height,
(boxes[index][0]) / self.width,
(boxes[index][1] + boxes[index][3]) / self.height,
(boxes[index][0] + boxes[index][2]) / self.width
]
return detections
@hide_warnings
def inference(self, tensor_input):
return self.rknn.inference(inputs=tensor_input)
def detect_raw(self, tensor_input):
output = self.inference([tensor_input,])
return self.postprocess(output[0])

Binary file not shown.

View File

@ -5,7 +5,7 @@ title: Object Detectors
# Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt` and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## CPU Detector (not recommended)
@ -291,3 +291,49 @@ To verify that the integration is working correctly, start Frigate and observe t
# Community Supported Detectors
## Rockchip RKNN-Toolkit-Lite2
This detector is only available if one of the following Rockchip SoCs is used:
- RK3566/RK3568
- RK3588/RK3588S
- RV1103/RV1106
- RK3562
These SoCs come with a NPU that will highly speed up detection.
### Setup
All you have to do is to use the `frigate:latest-rk` docker image. Here is a minimal `docker-compose.yml`:
```
version: "3.9"
services:
frigate:
container_name: frigate
image: frigate:latest-rk
privileged: true
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- ./data/config:/config
- ./data/media:/media/frigate
ports:
- "5000:5000"
```
## Configuration
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
```
detectors: # required
rknn: # required
type: rknn # required
model: # required
# path to .rknn model file
path: /models/yolov8n-320x320.rknn
# width and height of detection frames
width: 320
height: 320
# pixel format of detection frame
# default value is rgb but yolov models usually use bgr format
input_pixel_format: bgr # required
# shape of detection frame
input_tensor: nhwc
```