mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 10:45:21 +03:00
Prepare for multi-arch tensorrt build
This commit is contained in:
parent
ffaaa77115
commit
8829dc26e2
@ -9,28 +9,11 @@ ARG DEBIAN_FRONTEND
|
||||
ARG TARGETARCH
|
||||
|
||||
# Add TensorRT wheels to another folder
|
||||
COPY docker/tensorrt/requirements.txt /requirements-tensorrt.txt
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps
|
||||
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS frigate-tensorrt
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
ENV TRT_VER=8.5.3
|
||||
ENV YOLO_MODELS="yolov7-tiny-416"
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ldconfig
|
||||
26
docker/tensorrt/Dockerfile.base
Normal file
26
docker/tensorrt/Dockerfile.base
Normal file
@ -0,0 +1,26 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM ${TRT_BASE} AS trt-deps
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS tensorrt-base
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS="yolov7-tiny-416"
|
||||
@ -17,7 +17,7 @@ for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
# Remove old link in case path/version changed
|
||||
rm -f ${MODEL_CACHE_DIR}/${model}.trt
|
||||
|
||||
|
||||
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
|
||||
if [[ ${FIRST_MODEL} = true ]]; then
|
||||
MODEL_CONVERT="${model}"
|
||||
@ -40,14 +40,21 @@ echo "Generating the following TRT Models: ${MODEL_CONVERT}"
|
||||
# Build trt engine
|
||||
cd /usr/local/src/tensorrt_demos/yolo
|
||||
|
||||
# Download yolo weights
|
||||
./download_yolo.sh $MODEL_CONVERT > /dev/null
|
||||
echo "Downloading yolo weights"
|
||||
./download_yolo.sh $MODEL_DOWNLOAD 2> /dev/null
|
||||
|
||||
for model in ${MODEL_CONVERT//,/ }
|
||||
do
|
||||
echo "Converting ${model} model"
|
||||
python3 yolo_to_onnx.py -m ${model} > /dev/null
|
||||
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
|
||||
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
|
||||
|
||||
echo -e "\nGenerating ${model}.trt. This may take a few minutes.\n"; start=$(date +%s)
|
||||
cmd="python3 onnx_to_tensorrt.py -m ${model}"
|
||||
$cmd > /tmp/onnx_to_tensorrt.log || { cat /tmp/onnx_to_tensorrt.log && continue; }
|
||||
|
||||
mv ${model%-dla}.trt ${OUTPUT_FOLDER}/${model}.trt;
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
echo "Generated ${model}.trt in $(($(date +%s)-start)) seconds"
|
||||
done
|
||||
|
||||
echo "Available tensorrt models:"
|
||||
cd ${OUTPUT_FOLDER} && ls *.trt;
|
||||
|
||||
@ -8,7 +8,10 @@ SCRIPT_DIR="/usr/local/src/tensorrt_demos"
|
||||
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
|
||||
|
||||
# Build libyolo
|
||||
cd ./tensorrt_demos/plugins && make all
|
||||
if [ ! -e /usr/local/cuda ]; then
|
||||
ln -s /usr/local/cuda-* /usr/local/cuda
|
||||
fi
|
||||
cd ./tensorrt_demos/plugins && make all -j$(nproc)
|
||||
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
|
||||
# Store yolo scripts for later conversion
|
||||
|
||||
@ -1,19 +1,27 @@
|
||||
variable "ARCH" {
|
||||
default = "amd64"
|
||||
}
|
||||
|
||||
target "_build_args" {
|
||||
platforms = ["linux/${ARCH}"]
|
||||
}
|
||||
|
||||
target deps {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/amd64"]
|
||||
target = "deps"
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target rootfs {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/amd64"]
|
||||
target = "rootfs"
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target wheels {
|
||||
dockerfile = "docker/main/Dockerfile"
|
||||
platforms = ["linux/amd64"]
|
||||
target = "wheels"
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target devcontainer {
|
||||
@ -22,16 +30,25 @@ target devcontainer {
|
||||
target = "devcontainer"
|
||||
}
|
||||
|
||||
target tensorrt {
|
||||
dockerfile = "docker/tensorrt/Dockerfile"
|
||||
target "tensorrt-base" {
|
||||
dockerfile = "docker/tensorrt/Dockerfile.base"
|
||||
context = "."
|
||||
contexts = {
|
||||
deps = "target:deps",
|
||||
}
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target "tensorrt" {
|
||||
dockerfile = "docker/tensorrt/Dockerfile.${ARCH}"
|
||||
context = "."
|
||||
contexts = {
|
||||
tensorrt-base = "target:tensorrt-base",
|
||||
rootfs = "target:rootfs"
|
||||
wheels = "target:wheels"
|
||||
}
|
||||
platforms = ["linux/amd64"]
|
||||
target = "frigate-tensorrt"
|
||||
inherits = ["_build_args"]
|
||||
}
|
||||
|
||||
target devcontainer-trt {
|
||||
@ -45,4 +62,4 @@ target devcontainer-trt {
|
||||
}
|
||||
platforms = ["linux/amd64"]
|
||||
target = "devcontainer-trt"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,10 +1,12 @@
|
||||
BOARDS += trt
|
||||
|
||||
X86_DGPU_ARGS := ARCH=amd64
|
||||
|
||||
local-trt: version
|
||||
docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
|
||||
|
||||
build-trt:
|
||||
docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
|
||||
push-trt: build-trt
|
||||
docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
|
||||
|
||||
Loading…
Reference in New Issue
Block a user