add converter to generate ssd_mobilenet_v2_coco

This commit is contained in:
YS 2021-12-21 22:31:58 +03:00
parent b0c314a1ee
commit 3ba91f09f7
5 changed files with 97 additions and 0 deletions

View File

@ -0,0 +1,10 @@
FROM nvcr.io/nvidia/l4t-tensorflow:r32.6.1-tf1.15-py3
RUN apt-get update && apt-get install -y git sudo
RUN git clone https://github.com/jkjung-avt/tensorrt_demos.git /tensorrt_demos
ADD 0001-fix-trt.patch /tensorrt_demos/0001-fix-trt.patch
RUN cd /tensorrt_demos && \
git apply 0001-fix-trt.patch
ADD run.sh /run.sh

View File

@ -0,0 +1,14 @@
A build.sh file will convert pre-trained tensorflow Single-Shot Multibox Detector (SSD) models through UFF to TensorRT engine to do real-time object detection with the TensorRT engine.
Output will be copied to the ./model folder
Note:
This will consume pretty significant amound of memory. You might consider extending swap on Jetson Nano
Usage:
cd ./frigate/converters/ssd_mobilenet_v2_coco/
./build.sh

View File

@ -0,0 +1,52 @@
From 40953eaae8ca55838e046325b257faaff0bbe33f Mon Sep 17 00:00:00 2001
From: YS <ys@gm.com>
Date: Tue, 21 Dec 2021 21:01:35 +0300
Subject: [PATCH] fix trt
---
ssd/build_engine.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/ssd/build_engine.py b/ssd/build_engine.py
index 65729a9..e4a55c8 100644
--- a/ssd/build_engine.py
+++ b/ssd/build_engine.py
@@ -17,7 +17,6 @@ import uff
import tensorrt as trt
import graphsurgeon as gs
-
DIR_NAME = os.path.dirname(__file__)
LIB_FILE = os.path.abspath(os.path.join(DIR_NAME, 'libflattenconcat.so'))
MODEL_SPECS = {
@@ -286,19 +285,23 @@ def main():
text=True,
debug_mode=DEBUG_UFF)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
- builder.max_workspace_size = 1 << 28
+ config = builder.create_builder_config()
+ config.max_workspace_size = 1 << 28
builder.max_batch_size = 1
- builder.fp16_mode = True
+ config.set_flag(trt.BuilderFlag.FP16)
parser.register_input('Input', INPUT_DIMS)
parser.register_output('MarkOutput_0')
parser.parse(spec['tmp_uff'], network)
- engine = builder.build_cuda_engine(network)
+
+ plan = builder.build_serialized_network(network, config)
+
+ with trt.Runtime(TRT_LOGGER) as runtime:
+ engine = runtime.deserialize_cuda_engine(plan)
buf = engine.serialize()
with open(spec['output_bin'], 'wb') as f:
f.write(buf)
-
if __name__ == '__main__':
main()
--
2.17.1

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -xe
cd /tensorrt_demos/ssd
./install.sh
python3 build_engine.py ssd_mobilenet_v2_coco
cp /tensorrt_demos/ssd/TRT_ssd_mobilenet_v2_coco.bin /model/TRT_ssd_mobilenet_v2_coco.bin

View File

@ -0,0 +1,15 @@
#!/bin/bash
mkdir -p $(pwd)/model
docker build --tag models.ssd_v2_coco --file ./Dockerfile.l4t.tf15 ./assets/
sudo docker run --rm -it --name models.ssd_v2_coco \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
-v $(pwd)/model:/model:rw \
-v /tmp/argus_socket:/tmp/argus_socket \
-e NVIDIA_VISIBLE_DEVICES=all \
-e NVIDIA_DRIVER_CAPABILITIES=compute,utility,video \
--runtime=nvidia \
--privileged \
models.ssd_v2_coco /run.sh