From bdabf176160733619f8e471aa43924a5e72e855a Mon Sep 17 00:00:00 2001 From: Nate Meyer Date: Sat, 24 Jun 2023 11:25:01 -0400 Subject: [PATCH] Add S6 scripts to test and convert specified TensortRT models at startup. Rearrange tensorrt files into a docker support folder. --- Dockerfile | 9 +++- .../etc/ld.so.conf.d/cuda_tensorrt.conf | 0 .../frigate/dependencies.d/trt-model-prepare | 0 .../trt-model-prepare/dependencies.d/base | 0 .../rootfs/etc/s6-rc.d/trt-model-prepare/run | 45 +++++++++++++++++++ .../rootfs/etc/s6-rc.d/trt-model-prepare/type | 1 + .../rootfs/etc/s6-rc.d/trt-model-prepare/up | 1 + .../tensorrt_detector/tensorrt_libyolo.sh | 16 +++++++ docker/tensorrt_libyolo.sh | 10 ----- docker/tensorrt_models.sh | 32 ------------- 10 files changed, 70 insertions(+), 44 deletions(-) rename docker/{ => support/tensorrt_detector}/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf (100%) create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/frigate/dependencies.d/trt-model-prepare create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/dependencies.d/base create mode 100755 docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/run create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/type create mode 100644 docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/up create mode 100755 docker/support/tensorrt_detector/tensorrt_libyolo.sh delete mode 100755 docker/tensorrt_libyolo.sh delete mode 100755 docker/tensorrt_models.sh diff --git a/Dockerfile b/Dockerfile index cc38358f7..6109563da 100644 --- a/Dockerfile +++ b/Dockerfile @@ -265,16 +265,21 @@ COPY --from=rootfs / / # Build TensorRT-specific library FROM nvcr.io/nvidia/tensorrt:23.05-py3 AS trt-deps -RUN --mount=type=bind,source=docker/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ +RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ /tensorrt_libyolo.sh # Frigate w/ TensorRT Support as separate image FROM frigate AS frigate-tensorrt +ENV YOLO_MODELS="yolov7-tiny-416" + COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/support/tensorrt_detector/rootfs/ / + RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl && \ - ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \ + ln -s libnvrtc.so.12.1 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \ ldconfig # Dev Container w/ TRT diff --git a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf similarity index 100% rename from docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf rename to docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/frigate/dependencies.d/trt-model-prepare new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/dependencies.d/base new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/run b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/run new file mode 100755 index 000000000..0656b126b --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/run @@ -0,0 +1,45 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Prepare the logs folder for s6-log + +set -o errexit -o nounset -o pipefail + +OUTPUT_FOLDER?=/media/frigate/model_cache/tensorrt + +# Create output folder +mkdir -p ${OUTPUT_FOLDER} + +FIRST_MODEL=true +MODEL_CONVERT="" + +for model in ${YOLO_MODELS//,/ } +do + if ![[ -f ${OUTPUT_FOLDER}/${model}.trt ]]; then + if [[FIRST_MODEL == true]]; then + MODEL_CONVERT="${model}" + FIRST_MODEL=false + else + MODEL_CONVERT+=",{$model}" + fi + fi +done + +if [[${MODEL_CONVERT} == ""]]; then + echo "No models to convert." + exit 0; +fi + +echo "Generating the following TRT Models: ${YOLO_MODELS}" + +# Build trt engine +cd /usr/local/src/tensorrt_demos/yolo + +# Download yolo weights +./download_yolo.sh $MODEL_CONVERT + +for model in ${MODEL_CONVERT//,/ } +do + python3 yolo_to_onnx.py -m ${model} + python3 onnx_to_tensorrt.py -m ${model} + cp /tmp/tensorrt_demos-conditional_download/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; +done \ No newline at end of file diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/type b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/up b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/up new file mode 100644 index 000000000..b9de40ad0 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-rc.d/trt-model-prepare/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/trt-model-prepare/run diff --git a/docker/support/tensorrt_detector/tensorrt_libyolo.sh b/docker/support/tensorrt_detector/tensorrt_libyolo.sh new file mode 100755 index 000000000..44f733bb2 --- /dev/null +++ b/docker/support/tensorrt_detector/tensorrt_libyolo.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -euxo pipefail + +# Clone tensorrt_demos repo +git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download + +# Build libyolo +cd ./tensorrt_demos/plugins && make all +cp libyolo_layer.so /usr/local/lib/libyolo_layer.so +cp libyolo_layer.so ../yolo/libyolo_layer.so + +# Store yolo scripts for later conversion +cd ../ +mkdir -p /usr/local/src/tensorrt_demos +cp -a yolo /usr/local/src/tensorrt_demos/ \ No newline at end of file diff --git a/docker/tensorrt_libyolo.sh b/docker/tensorrt_libyolo.sh deleted file mode 100755 index dc7a39ca1..000000000 --- a/docker/tensorrt_libyolo.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -# Clone tensorrt_demos repo -git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git - -# Build libyolo -cd ./tensorrt_demos/plugins && make all -cp libyolo_layer.so /usr/local/lib/libyolo_layer.so \ No newline at end of file diff --git a/docker/tensorrt_models.sh b/docker/tensorrt_models.sh deleted file mode 100755 index 27e98a078..000000000 --- a/docker/tensorrt_models.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -OUTPUT_FOLDER=/trt-models -echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}" - -# Create output folder -mkdir -p ${OUTPUT_FOLDER} - -# Clone tensorrt_demos repo -# git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download /tmp/tensorrt_demos -cd /tmp/ && wget -qO tensorrt_demos.zip https://github.com/NateMeyer/tensorrt_demos/archive/refs/heads/conditional_download.zip -unzip tensorrt_demos.zip - -cp /usr/local/lib/libyolo_layer.so /tmp/tensorrt_demos-conditional_download/plugins/libyolo_layer.so - -# Download yolo weights -cd /tmp/tensorrt_demos-conditional_download/yolo && ./download_yolo.sh $YOLO_MODELS - -# Build trt engine -cd /tmp/tensorrt_demos-conditional_download/yolo - -for model in ${YOLO_MODELS//,/ } -do - python3 yolo_to_onnx.py -m ${model} - python3 onnx_to_tensorrt.py -m ${model} - cp /tmp/tensorrt_demos-conditional_download/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; -done - -# Cleanup repo -rm -r /tmp/tensorrt_demos-conditional_download