Make TensorRT a separate build from the base Frigate image.

This commit is contained in:
Nate Meyer 2022-12-28 09:39:14 -05:00
parent 4198e79807
commit 111fdfbdbc
5 changed files with 60 additions and 73 deletions

View File

@ -124,34 +124,6 @@ RUN /bin/mkdir -p '/usr/local/lib' && \
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
ldconfig ldconfig
####
#
# TensorRT Support
#
# 1. Download and convert a model for the tensorRT runtime
#
####
# Download and Convert TensorRT Model
# FROM base_amd64 as tensorrt-converter
# RUN apt-get update && apt-get install -y --no-install-recommends git sudo software-properties-common \
# cmake build-essential unzip python3.9-dev libnvinfer-dev python-is-python3 libnvparsers-dev libnvinfer-plugin-dev
# RUN git clone https://github.com/jkjung-avt/tensorrt_demos.git /tensorrt_demos
# ENV CUDA_HOME=/usr/local/cuda
# ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
# ENV PATH=$PATH:$CUDA_HOME/bin
# RUN python3 -m pip install --upgrade pip
# # ADD install_protobuf.sh /install_protobuf.sh
# # RUN /install_protobuf.sh
# RUN pip3 install cython protobuf onnx==1.4.1
# RUN cd /tensorrt_demos/yolo && ./download_yolo.sh
# ADD run.sh /run.sh
FROM wget AS models FROM wget AS models
# Get model and labels # Get model and labels
@ -212,6 +184,10 @@ RUN pip3 install -r requirements.txt
COPY requirements-wheels.txt /requirements-wheels.txt COPY requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt
# Add TensorRT wheels to another folder
COPY requirements-tensorrt.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r requirements-tensorrt.txt
# Collect deps in a single layer # Collect deps in a single layer
FROM scratch AS deps-rootfs FROM scratch AS deps-rootfs
@ -319,7 +295,12 @@ COPY migrations migrations/
COPY --from=web-build /work/dist/ web/ COPY --from=web-build /work/dist/ web/
# Frigate final container # Frigate final container
FROM deps FROM deps AS frigate
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY --from=rootfs / / COPY --from=rootfs / /
# Frigate w/ TensorRT Support as separate image
FROM frigate AS frigate-tensorrt
RUN --mount=type=bind,from=wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl

View File

@ -10,22 +10,27 @@ version:
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
local: version local: version
docker buildx build --tag frigate:latest --load . docker buildx build --target=frigate --tag frigate:latest --load .
local-trt: version
docker buildx build --target=frigate-tensorrt --tag frigate:latest-tensorrt --load .
amd64: amd64:
docker buildx build --platform linux/amd64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
docker buildx build --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH)-tensorrt .
arm64: arm64:
docker buildx build --platform linux/arm64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
armv7: armv7:
docker buildx build --platform linux/arm/v7 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . docker buildx build --platform linux/arm/v7 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
build: version amd64 arm64 armv7 build: version amd64 arm64 armv7
docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
push: build push: build
docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) . docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) .
docker buildx build --push --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt .
run: local run: local
docker run --rm --publish=5000:5000 --volume=${PWD}/config/config.yml:/config/config.yml frigate:latest docker run --rm --publish=5000:5000 --volume=${PWD}/config/config.yml:/config/config.yml frigate:latest

View File

@ -2,8 +2,14 @@ import logging
import ctypes import ctypes
import numpy as np import numpy as np
import tensorrt as trt
from cuda import cuda, cudart try:
import tensorrt as trt
from cuda import cuda, cudart
TRT_SUPPORT = True
except ModuleNotFoundError as e:
TRT_SUPPORT = False
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
@ -14,8 +20,9 @@ logger = logging.getLogger(__name__)
DETECTOR_KEY = "tensorrt" DETECTOR_KEY = "tensorrt"
if TRT_SUPPORT:
class TrtLogger(trt.ILogger): class TrtLogger(trt.ILogger):
def __init__(self): def __init__(self):
trt.ILogger.__init__(self) trt.ILogger.__init__(self)
@ -174,6 +181,10 @@ class TensorRtDetector(DetectionApi):
] ]
def __init__(self, detector_config: TensorRTDetectorConfig): def __init__(self, detector_config: TensorRTDetectorConfig):
assert (
TRT_SUPPORT
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
(cuda_err,) = cuda.cuInit(0) (cuda_err,) = cuda.cuInit(0)
assert ( assert (
cuda_err == cuda.CUresult.CUDA_SUCCESS cuda_err == cuda.CUresult.CUDA_SUCCESS

View File

@ -1,10 +1,11 @@
# NVidia TensorRT Support (amd64 only)
nvidia-pyindex; platform_machine == 'x86_64' nvidia-pyindex; platform_machine == 'x86_64'
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64' nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
cuda-python == 11.7; platform_machine == 'x86_64' cuda-python == 11.7; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64' cython == 0.29.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.7.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu11 == 2022.4.25; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.10.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu117 == 11.7.*; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.4.*; platform_machine == 'x86_64' nvidia-cublas-cu11 == 2022.4.8; platform_machine == 'x86_64'
polygraphy nvidia-cublas-cu117 == 11.10.*; platform_machine == 'x86_64'
tensorflow nvidia-cudnn-cu11 == 2022.5.19; platform_machine == 'x86_64'
easydict nvidia-cudnn-cu116 == 8.4.*; platform_machine == 'x86_64'

View File

@ -24,14 +24,3 @@ zeroconf == 0.39.4
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_armv7l.whl; platform_machine == 'armv7l' openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_armv7l.whl; platform_machine == 'armv7l'
# NVidia TensorRT Support (amd64 only)
nvidia-pyindex; platform_machine == 'x86_64'
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
cuda-python == 11.7; platform_machine == 'x86_64'
cython == 0.29.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 2022.4.25; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu117 == 11.7.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 2022.4.8; platform_machine == 'x86_64'
nvidia-cublas-cu117 == 11.10.*; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 2022.5.19; platform_machine == 'x86_64'
nvidia-cudnn-cu116 == 8.4.*; platform_machine == 'x86_64'