mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-03 09:45:22 +03:00
Make TensorRT a separate build from the base Frigate image.
This commit is contained in:
parent
4198e79807
commit
111fdfbdbc
39
Dockerfile
39
Dockerfile
@ -124,34 +124,6 @@ RUN /bin/mkdir -p '/usr/local/lib' && \
|
|||||||
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
/usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
|
||||||
ldconfig
|
ldconfig
|
||||||
|
|
||||||
####
|
|
||||||
#
|
|
||||||
# TensorRT Support
|
|
||||||
#
|
|
||||||
# 1. Download and convert a model for the tensorRT runtime
|
|
||||||
#
|
|
||||||
####
|
|
||||||
|
|
||||||
# Download and Convert TensorRT Model
|
|
||||||
# FROM base_amd64 as tensorrt-converter
|
|
||||||
|
|
||||||
# RUN apt-get update && apt-get install -y --no-install-recommends git sudo software-properties-common \
|
|
||||||
# cmake build-essential unzip python3.9-dev libnvinfer-dev python-is-python3 libnvparsers-dev libnvinfer-plugin-dev
|
|
||||||
|
|
||||||
# RUN git clone https://github.com/jkjung-avt/tensorrt_demos.git /tensorrt_demos
|
|
||||||
|
|
||||||
# ENV CUDA_HOME=/usr/local/cuda
|
|
||||||
# ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
|
|
||||||
# ENV PATH=$PATH:$CUDA_HOME/bin
|
|
||||||
# RUN python3 -m pip install --upgrade pip
|
|
||||||
# # ADD install_protobuf.sh /install_protobuf.sh
|
|
||||||
# # RUN /install_protobuf.sh
|
|
||||||
# RUN pip3 install cython protobuf onnx==1.4.1
|
|
||||||
# RUN cd /tensorrt_demos/yolo && ./download_yolo.sh
|
|
||||||
# ADD run.sh /run.sh
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
FROM wget AS models
|
FROM wget AS models
|
||||||
|
|
||||||
# Get model and labels
|
# Get model and labels
|
||||||
@ -212,6 +184,10 @@ RUN pip3 install -r requirements.txt
|
|||||||
COPY requirements-wheels.txt /requirements-wheels.txt
|
COPY requirements-wheels.txt /requirements-wheels.txt
|
||||||
RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt
|
RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt
|
||||||
|
|
||||||
|
# Add TensorRT wheels to another folder
|
||||||
|
COPY requirements-tensorrt.txt /requirements-tensorrt.txt
|
||||||
|
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r requirements-tensorrt.txt
|
||||||
|
|
||||||
|
|
||||||
# Collect deps in a single layer
|
# Collect deps in a single layer
|
||||||
FROM scratch AS deps-rootfs
|
FROM scratch AS deps-rootfs
|
||||||
@ -319,7 +295,12 @@ COPY migrations migrations/
|
|||||||
COPY --from=web-build /work/dist/ web/
|
COPY --from=web-build /work/dist/ web/
|
||||||
|
|
||||||
# Frigate final container
|
# Frigate final container
|
||||||
FROM deps
|
FROM deps AS frigate
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
|
# Frigate w/ TensorRT Support as separate image
|
||||||
|
FROM frigate AS frigate-tensorrt
|
||||||
|
RUN --mount=type=bind,from=wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
|
pip3 install -U /deps/trt-wheels/*.whl
|
||||||
|
|||||||
17
Makefile
17
Makefile
@ -10,22 +10,27 @@ version:
|
|||||||
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
|
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
|
||||||
|
|
||||||
local: version
|
local: version
|
||||||
docker buildx build --tag frigate:latest --load .
|
docker buildx build --target=frigate --tag frigate:latest --load .
|
||||||
|
|
||||||
|
local-trt: version
|
||||||
|
docker buildx build --target=frigate-tensorrt --tag frigate:latest-tensorrt --load .
|
||||||
|
|
||||||
amd64:
|
amd64:
|
||||||
docker buildx build --platform linux/amd64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
||||||
|
docker buildx build --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH)-tensorrt .
|
||||||
|
|
||||||
arm64:
|
arm64:
|
||||||
docker buildx build --platform linux/arm64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
||||||
|
|
||||||
armv7:
|
armv7:
|
||||||
docker buildx build --platform linux/arm/v7 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
docker buildx build --platform linux/arm/v7 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
||||||
|
|
||||||
build: version amd64 arm64 armv7
|
build: version amd64 arm64 armv7
|
||||||
docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
|
||||||
|
|
||||||
push: build
|
push: build
|
||||||
docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) .
|
docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) .
|
||||||
|
docker buildx build --push --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt .
|
||||||
|
|
||||||
run: local
|
run: local
|
||||||
docker run --rm --publish=5000:5000 --volume=${PWD}/config/config.yml:/config/config.yml frigate:latest
|
docker run --rm --publish=5000:5000 --volume=${PWD}/config/config.yml:/config/config.yml frigate:latest
|
||||||
|
|||||||
@ -2,8 +2,14 @@ import logging
|
|||||||
|
|
||||||
import ctypes
|
import ctypes
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorrt as trt
|
|
||||||
from cuda import cuda, cudart
|
try:
|
||||||
|
import tensorrt as trt
|
||||||
|
from cuda import cuda, cudart
|
||||||
|
|
||||||
|
TRT_SUPPORT = True
|
||||||
|
except ModuleNotFoundError as e:
|
||||||
|
TRT_SUPPORT = False
|
||||||
|
|
||||||
from frigate.detectors.detection_api import DetectionApi
|
from frigate.detectors.detection_api import DetectionApi
|
||||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||||
@ -14,27 +20,28 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
DETECTOR_KEY = "tensorrt"
|
DETECTOR_KEY = "tensorrt"
|
||||||
|
|
||||||
|
if TRT_SUPPORT:
|
||||||
|
|
||||||
class TrtLogger(trt.ILogger):
|
class TrtLogger(trt.ILogger):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
trt.ILogger.__init__(self)
|
trt.ILogger.__init__(self)
|
||||||
|
|
||||||
def log(self, severity, msg):
|
def log(self, severity, msg):
|
||||||
logger.log(self.getSeverity(severity), msg)
|
logger.log(self.getSeverity(severity), msg)
|
||||||
|
|
||||||
def getSeverity(self, sev: trt.ILogger.Severity) -> int:
|
def getSeverity(self, sev: trt.ILogger.Severity) -> int:
|
||||||
if sev == trt.ILogger.VERBOSE:
|
if sev == trt.ILogger.VERBOSE:
|
||||||
return logging.DEBUG
|
return logging.DEBUG
|
||||||
elif sev == trt.ILogger.INFO:
|
elif sev == trt.ILogger.INFO:
|
||||||
return logging.INFO
|
return logging.INFO
|
||||||
elif sev == trt.ILogger.WARNING:
|
elif sev == trt.ILogger.WARNING:
|
||||||
return logging.WARNING
|
return logging.WARNING
|
||||||
elif sev == trt.ILogger.ERROR:
|
elif sev == trt.ILogger.ERROR:
|
||||||
return logging.ERROR
|
return logging.ERROR
|
||||||
elif sev == trt.ILogger.INTERNAL_ERROR:
|
elif sev == trt.ILogger.INTERNAL_ERROR:
|
||||||
return logging.CRITICAL
|
return logging.CRITICAL
|
||||||
else:
|
else:
|
||||||
return logging.DEBUG
|
return logging.DEBUG
|
||||||
|
|
||||||
|
|
||||||
class TensorRTDetectorConfig(BaseDetectorConfig):
|
class TensorRTDetectorConfig(BaseDetectorConfig):
|
||||||
@ -174,6 +181,10 @@ class TensorRtDetector(DetectionApi):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, detector_config: TensorRTDetectorConfig):
|
def __init__(self, detector_config: TensorRTDetectorConfig):
|
||||||
|
assert (
|
||||||
|
TRT_SUPPORT
|
||||||
|
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||||
|
|
||||||
(cuda_err,) = cuda.cuInit(0)
|
(cuda_err,) = cuda.cuInit(0)
|
||||||
assert (
|
assert (
|
||||||
cuda_err == cuda.CUresult.CUDA_SUCCESS
|
cuda_err == cuda.CUresult.CUDA_SUCCESS
|
||||||
|
|||||||
@ -1,10 +1,11 @@
|
|||||||
|
# NVidia TensorRT Support (amd64 only)
|
||||||
nvidia-pyindex; platform_machine == 'x86_64'
|
nvidia-pyindex; platform_machine == 'x86_64'
|
||||||
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
|
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
|
||||||
cuda-python == 11.7; platform_machine == 'x86_64'
|
cuda-python == 11.7; platform_machine == 'x86_64'
|
||||||
cython == 0.29.*; platform_machine == 'x86_64'
|
cython == 0.29.*; platform_machine == 'x86_64'
|
||||||
nvidia-cuda-runtime-cu11 == 11.7.*; platform_machine == 'x86_64'
|
nvidia-cuda-runtime-cu11 == 2022.4.25; platform_machine == 'x86_64'
|
||||||
nvidia-cublas-cu11 == 11.10.*; platform_machine == 'x86_64'
|
nvidia-cuda-runtime-cu117 == 11.7.*; platform_machine == 'x86_64'
|
||||||
nvidia-cudnn-cu11 == 8.4.*; platform_machine == 'x86_64'
|
nvidia-cublas-cu11 == 2022.4.8; platform_machine == 'x86_64'
|
||||||
polygraphy
|
nvidia-cublas-cu117 == 11.10.*; platform_machine == 'x86_64'
|
||||||
tensorflow
|
nvidia-cudnn-cu11 == 2022.5.19; platform_machine == 'x86_64'
|
||||||
easydict
|
nvidia-cudnn-cu116 == 8.4.*; platform_machine == 'x86_64'
|
||||||
@ -23,15 +23,4 @@ zeroconf == 0.39.4
|
|||||||
# Openvino Library - Custom built with MYRIAD support
|
# Openvino Library - Custom built with MYRIAD support
|
||||||
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
|
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
|
||||||
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'
|
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'
|
||||||
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_armv7l.whl; platform_machine == 'armv7l'
|
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_armv7l.whl; platform_machine == 'armv7l'
|
||||||
# NVidia TensorRT Support (amd64 only)
|
|
||||||
nvidia-pyindex; platform_machine == 'x86_64'
|
|
||||||
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
|
|
||||||
cuda-python == 11.7; platform_machine == 'x86_64'
|
|
||||||
cython == 0.29.*; platform_machine == 'x86_64'
|
|
||||||
nvidia-cuda-runtime-cu11 == 2022.4.25; platform_machine == 'x86_64'
|
|
||||||
nvidia-cuda-runtime-cu117 == 11.7.*; platform_machine == 'x86_64'
|
|
||||||
nvidia-cublas-cu11 == 2022.4.8; platform_machine == 'x86_64'
|
|
||||||
nvidia-cublas-cu117 == 11.10.*; platform_machine == 'x86_64'
|
|
||||||
nvidia-cudnn-cu11 == 2022.5.19; platform_machine == 'x86_64'
|
|
||||||
nvidia-cudnn-cu116 == 8.4.*; platform_machine == 'x86_64'
|
|
||||||
Loading…
Reference in New Issue
Block a user