Update tensorrt_models script to convert models from the frigate container

This commit is contained in:
Nate Meyer 2023-06-20 00:21:42 -04:00
parent 46787b25e2
commit d244a2532c
4 changed files with 19 additions and 16 deletions

View File

@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
libva-drm2 mesa-va-drivers libva-drm2 mesa-va-drivers
fi fi
apt-get purge gnupg apt-transport-https wget xz-utils -y apt-get purge gnupg apt-transport-https xz-utils -y
apt-get clean autoclean -y apt-get clean autoclean -y
apt-get autoremove --purge -y apt-get autoremove --purge -y
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*

View File

@ -1,3 +1,4 @@
/usr/local/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib /usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib

View File

@ -2,33 +2,31 @@
set -euxo pipefail set -euxo pipefail
CUDA_HOME=/usr/local/cuda OUTPUT_FOLDER=/trt_models
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
OUTPUT_FOLDER=/tensorrt_models
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}" echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
# Create output folder # Create output folder
mkdir -p ${OUTPUT_FOLDER} mkdir -p ${OUTPUT_FOLDER}
# Install packages
pip install --upgrade pip && pip install onnx==1.15.0 protobuf==3.20.3
# Clone tensorrt_demos repo # Clone tensorrt_demos repo
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos # git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download /tmp/tensorrt_demos
cd /tmp/ && wget -qO tensorrt_demos.zip https://github.com/NateMeyer/tensorrt_demos/archive/refs/heads/conditional_download.zip
unzip tensorrt_demos.zip
# Build libyolo cp /usrl/local/lib/libyolo_layer.so /tmp/tensorrt_demos-conditional_download/plugins/libyolo_layer.so
cd /tensorrt_demos/plugins && make all
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
# Download yolo weights # Download yolo weights
cd /tensorrt_demos/yolo && ./download_yolo.sh cd /tmp/tensorrt_demos-conditional_download/yolo && ./download_yolo.sh $YOLO_MODELS
# Build trt engine # Build trt engine
cd /tensorrt_demos/yolo cd /tmp/tensorrt_demos-conditional_download/yolo
for model in ${YOLO_MODELS//,/ } for model in ${YOLO_MODELS//,/ }
do do
python3 yolo_to_onnx.py -m ${model} python3 yolo_to_onnx.py -m ${model}
python3 onnx_to_tensorrt.py -m ${model} python3 onnx_to_tensorrt.py -m ${model}
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; cp /tmp/tensorrt_demos-conditional_download/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
done done
# Cleanup repo
rm -r /tmp/tensorrt_demos-conditional_download

View File

@ -1,9 +1,13 @@
# NVidia TensorRT Support (amd64 only) # NVidia TensorRT Support (amd64 only)
nvidia-pyindex; platform_machine == 'x86_64' --extra-index-url 'https://pypi.nvidia.com'
tensorrt == 8.6.1; platform_machine == 'x86_64' tensorrt == 8.6.1; platform_machine == 'x86_64'
tensorrt-libs == 8.6.1; platform_machine == 'x86_64'
tensorrt-bindings == 8.6.1; platform_machine == 'x86_64'
cuda-python == 12.1; platform_machine == 'x86_64' cuda-python == 12.1; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64' cython == 0.29.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cublas-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cublas-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cudnn-cu12 == 8.9.*; platform_machine == 'x86_64' nvidia-cudnn-cu12 == 8.9.*; platform_machine == 'x86_64'
nvidia-cuda-nvrtc-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cuda-nvrtc-cu12 == 12.1.*; platform_machine == 'x86_64'
onnx==1.14.0; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'