diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7769737a7..0c460cfad 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -42,7 +42,6 @@ "extensions": [ "ms-python.python", "ms-python.vscode-pylance", - "ms-python.black-formatter", "visualstudioexptteam.vscodeintellicode", "mhutchie.git-graph", "ms-azuretools.vscode-docker", @@ -53,13 +52,10 @@ "csstools.postcss", "blanu.vscode-styled-jsx", "bradlc.vscode-tailwindcss", - "ms-python.isort", "charliermarsh.ruff" ], "settings": { "remote.autoForwardPorts": false, - "python.linting.pylintEnabled": true, - "python.linting.enabled": true, "python.formatting.provider": "none", "python.languageServer": "Pylance", "editor.formatOnPaste": false, @@ -72,7 +68,7 @@ "eslint.workingDirectories": ["./web"], "isort.args": ["--settings-path=./pyproject.toml"], "[python]": { - "editor.defaultFormatter": "ms-python.black-formatter", + "editor.defaultFormatter": "charliermarsh.ruff", "editor.formatOnSave": true, "editor.codeActionsOnSave": { "source.fixAll": true, diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml new file mode 100644 index 000000000..c96102edb --- /dev/null +++ b/.github/actions/setup/action.yml @@ -0,0 +1,39 @@ +name: 'Setup' +description: 'Set up QEMU and Buildx' +inputs: + GITHUB_TOKEN: + required: true +outputs: + image-name: + value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ steps.create-short-sha.outputs.SHORT_SHA }} + cache-name: + value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:cache +runs: + using: "composite" + steps: + - name: Remove unnecessary files + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/lib/android + sudo rm -rf /opt/ghc + shell: bash + - id: lowercaseRepo + uses: ASzc/change-string-case-action@v5 + with: + string: ${{ github.repository }} + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Log in to the Container registry + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ inputs.GITHUB_TOKEN }} + - name: Create version file + run: make version + shell: bash + - id: create-short-sha + run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT + shell: bash diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d424c6bf8..79e8b2881 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -13,7 +13,13 @@ updates: open-pull-requests-limit: 10 target-branch: dev - package-ecosystem: "pip" - directory: "/" + directory: "/docker/main" + schedule: + interval: daily + open-pull-requests-limit: 10 + target-branch: dev + - package-ecosystem: "pip" + directory: "/docker/tensorrt" schedule: interval: daily open-pull-requests-limit: 10 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 93b084a8d..c6fad8817 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,7 @@ name: CI on: + workflow_dispatch: push: branches: - dev @@ -15,53 +16,154 @@ env: PYTHON_VERSION: 3.9 jobs: - multi_arch_build: + amd64_build: runs-on: ubuntu-latest - name: Image Build + name: AMD64 Build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push amd64 standard build + uses: docker/build-push-action@v5 + with: + context: . + file: docker/main/Dockerfile + push: true + platforms: linux/amd64 + target: frigate + tags: ${{ steps.setup.outputs.image-name }}-amd64 + cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 + - name: Build and push TensorRT (x86 GPU) + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max + arm64_build: + runs-on: ubuntu-latest + name: ARM Build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push arm64 standard build + uses: docker/build-push-action@v5 + with: + context: . + file: docker/main/Dockerfile + push: true + platforms: linux/arm64 + target: frigate + tags: | + ${{ steps.setup.outputs.image-name }}-standard-arm64 + cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 + - name: Build and push RPi build + uses: docker/bake-action@v4 + with: + push: true + targets: rpi + files: docker/rpi/rpi.hcl + set: | + rpi.tags=${{ steps.setup.outputs.image-name }}-rpi + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max + - name: Build and push RockChip build + uses: docker/bake-action@v3 + with: + push: true + targets: rk + files: docker/rockchip/rk.hcl + set: | + rk.tags=${{ steps.setup.outputs.image-name }}-rk + *.cache-from=type=gha + jetson_jp4_build: + runs-on: ubuntu-latest + name: Jetson Jetpack 4 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (Jetson, Jetpack 4) + env: + ARCH: arm64 + BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest + SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest + TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp4 + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max + jetson_jp5_build: + runs-on: ubuntu-latest + name: Jetson Jetpack 5 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (Jetson, Jetpack 5) + env: + ARCH: arm64 + BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5 + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max + # The majority of users running arm64 are rpi users, so the rpi + # build should be the primary arm64 image + assemble_default_build: + runs-on: ubuntu-latest + name: Assemble and push default build + needs: + - amd64_build + - arm64_build steps: - - name: Remove unnecessary files - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - id: lowercaseRepo - uses: ASzc/change-string-case-action@v5 + uses: ASzc/change-string-case-action@v6 with: string: ${{ github.repository }} - - name: Check out code - uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - name: Log in to the Container registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Create version file - run: make version - name: Create short sha run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV - - name: Build and push - uses: docker/build-push-action@v4 + - uses: int128/docker-manifest-create-action@v1 with: - context: . - push: true - platforms: linux/amd64,linux/arm64 - target: frigate - tags: | - ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }} - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Build and push TensorRT - uses: docker/build-push-action@v4 - with: - context: . - push: true - platforms: linux/amd64 - target: frigate-tensorrt - tags: | - ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-tensorrt - cache-from: type=gha + tags: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }} + suffixes: | + -amd64 + -rpi diff --git a/.github/workflows/maintain_cache.yml b/.github/workflows/maintain_cache.yml deleted file mode 100644 index 30cb3de80..000000000 --- a/.github/workflows/maintain_cache.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Maintain Cache - -on: - schedule: - - cron: "13 0 * * 0,4" - -env: - PYTHON_VERSION: 3.9 - -jobs: - multi_arch_build: - runs-on: ubuntu-latest - name: Image Build - steps: - - name: Remove unnecessary files - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - - id: lowercaseRepo - uses: ASzc/change-string-case-action@v5 - with: - string: ${{ github.repository }} - - name: Check out code - uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Log in to the Container registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Create version file - run: make version - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - push: false - platforms: linux/amd64,linux/arm64 - target: frigate - cache-from: type=gha diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 7e97cfb6c..4b2d5fb4c 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -15,7 +15,7 @@ jobs: env: DOCKER_BUILDKIT: "1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -34,7 +34,7 @@ jobs: name: Web - Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -48,7 +48,7 @@ jobs: name: Web - Test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -63,22 +63,19 @@ jobs: name: Python Checks steps: - name: Check out the repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ env.DEFAULT_PYTHON }} - uses: actions/setup-python@v4.6.1 + uses: actions/setup-python@v4.7.1 with: python-version: ${{ env.DEFAULT_PYTHON }} - name: Install requirements run: | python3 -m pip install -U pip - python3 -m pip install -r requirements-dev.txt - - name: Check black + python3 -m pip install -r docker/main/requirements-dev.txt + - name: Check formatting run: | - black --check --diff frigate migrations docker *.py - - name: Check isort - run: | - isort --check --diff frigate migrations docker *.py - - name: Check ruff + ruff format --check --diff frigate migrations docker *.py + - name: Check lint run: | ruff check frigate migrations docker *.py @@ -87,7 +84,7 @@ jobs: name: Python Tests steps: - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -97,9 +94,9 @@ jobs: run: npm run build working-directory: ./web - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build run: make - name: Run mypy diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..3eb9785d9 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,37 @@ +name: On release + +on: + workflow_dispatch: + release: + types: [published] + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - id: lowercaseRepo + uses: ASzc/change-string-case-action@v6 + with: + string: ${{ github.repository }} + - name: Log in to the Container registry + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Create tag variables + run: | + BRANCH=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "master" || echo "dev") + echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV + echo "BUILD_TAG=${BRANCH}-${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV + - name: Tag and push the main image + run: | + VERSION_TAG=${BASE}:${CLEAN_VERSION} + PULL_TAG=${BASE}:${BUILD_TAG} + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} + for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} + done diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..48b26a359 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,6 @@ +# Community-supported boards +/docker/tensorrt/ @madsciencetist @NateMeyer +/docker/tensorrt/*arm64* @madsciencetist +/docker/tensorrt/*jetson* @madsciencetist + +/docker/rockchip/ @MarcA711 diff --git a/Makefile b/Makefile index 9035a58f4..2cd831670 100644 --- a/Makefile +++ b/Makefile @@ -3,31 +3,34 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) VERSION = 0.13.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate +GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) CURRENT_UID := $(shell id -u) CURRENT_GID := $(shell id -g) +BOARDS= #Initialized empty + +include docker/*/*.mk + +build-boards: $(BOARDS:%=build-%) + +push-boards: $(BOARDS:%=push-%) version: echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py local: version - docker buildx build --target=frigate --tag frigate:latest --load . - -local-trt: version - docker buildx build --target=frigate-tensorrt --tag frigate:latest-tensorrt --load . + docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile . amd64: - docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . - docker buildx build --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH)-tensorrt . + docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . arm64: - docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . + docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . build: version amd64 arm64 - docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . + docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . -push: build - docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) . - docker buildx build --push --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt . +push: push-boards + docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile . run: local docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest diff --git a/docker-compose.yml b/docker-compose.yml index cf3613484..a4d349194 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,15 +11,19 @@ services: shm_size: "256mb" build: context: . + dockerfile: docker/main/Dockerfile # Use target devcontainer-trt for TensorRT dev target: devcontainer - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] + ## Uncomment this block for nvidia gpu support + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + environment: + YOLO_MODELS: yolov7-320 devices: - /dev/bus/usb:/dev/bus/usb # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware @@ -29,8 +33,6 @@ services: - /etc/localtime:/etc/localtime:ro - ./config:/config - ./debug:/media/frigate - # Create the trt-models folder using the documented method of generating TRT models - # - ./debug/trt-models:/trt-models - /dev/bus/usb:/dev/bus/usb mqtt: container_name: mqtt diff --git a/Dockerfile b/docker/main/Dockerfile similarity index 66% rename from Dockerfile rename to docker/main/Dockerfile index 660cb5b25..e35eac191 100644 --- a/Dockerfile +++ b/docker/main/Dockerfile @@ -1,13 +1,16 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.6 # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -FROM debian:11 AS base +ARG BASE_IMAGE=debian:11 +ARG SLIM_BASE=debian:11-slim -FROM --platform=linux/amd64 debian:11 AS base_amd64 +FROM ${BASE_IMAGE} AS base -FROM debian:11-slim AS slim-base +FROM --platform=${BUILDPLATFORM} debian:11 AS base_host + +FROM ${SLIM_BASE} AS slim-base FROM slim-base AS wget ARG DEBIAN_FRONTEND @@ -23,15 +26,14 @@ ENV CCACHE_MAXSIZE 2G # bind /var/cache/apt to tmpfs to speed up nginx build RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ - --mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \ + --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \ --mount=type=cache,target=/root/.ccache \ /deps/build_nginx.sh -FROM wget AS go2rtc +FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.5.0/go2rtc_linux_${TARGETARCH}" \ - && chmod +x go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.8.4/go2rtc_linux_${TARGETARCH}" go2rtc #### @@ -43,11 +45,11 @@ RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.5.0/ # #### # Download and Convert OpenVino model -FROM base_amd64 AS ov-converter +FROM base_host AS ov-converter ARG DEBIAN_FRONTEND # Install OpenVino Runtime and Dev library -COPY requirements-ov.txt /requirements-ov.txt +COPY docker/main/requirements-ov.txt /requirements-ov.txt RUN apt-get -qq update \ && apt-get -qq install -y wget python3 python3-distutils \ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ @@ -69,22 +71,22 @@ ENV CCACHE_MAXSIZE 2G # Build libUSB without udev. Needed for Openvino NCS2 support WORKDIR /opt -RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache -RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \ - unzip v1.0.25.zip && cd libusb-1.0.25 && \ +RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config +RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \ + unzip v1.0.26.zip && cd libusb-1.0.26 && \ ./bootstrap.sh && \ ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \ make -j $(nproc --all) RUN apt-get update && \ apt-get install -y --no-install-recommends libusb-1.0-0-dev && \ rm -rf /var/lib/apt/lists/* -WORKDIR /opt/libusb-1.0.25/libusb +WORKDIR /opt/libusb-1.0.26/libusb RUN /bin/mkdir -p '/usr/local/lib' && \ /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \ - cd /opt/libusb-1.0.25/ && \ + cd /opt/libusb-1.0.26/ && \ /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ ldconfig @@ -105,7 +107,7 @@ COPY audio-labelmap.txt . FROM wget AS s6-overlay ARG TARGETARCH -RUN --mount=type=bind,source=docker/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \ +RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \ /deps/install_s6_overlay.sh @@ -119,13 +121,15 @@ RUN apt-get -qq update \ apt-transport-https \ gnupg \ wget \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9165938D90FDDD2E \ - && echo "deb http://raspbian.raspberrypi.org/raspbian/ bullseye main contrib non-free rpi" | tee /etc/apt/sources.list.d/raspi.list \ + # the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html + && wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \ + gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \ + tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \ && apt-get -qq update \ && apt-get -qq install -y \ - python3 \ - python3-dev \ - wget \ + python3.9 \ + python3.9-dev \ # opencv dependencies build-essential cmake git pkg-config libgtk-3-dev \ libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \ @@ -134,28 +138,20 @@ RUN apt-get -qq update \ libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \ libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \ # scipy dependencies - gcc gfortran libopenblas-dev liblapack-dev \ - # faster-fifo dependencies - g++ cython3 && \ + gcc gfortran libopenblas-dev liblapack-dev && \ rm -rf /var/lib/apt/lists/* +# Ensure python3 defaults to python3.9 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ && python3 get-pip.py "pip" -COPY requirements.txt /requirements.txt -RUN pip3 install -r requirements.txt +COPY docker/main/requirements.txt /requirements.txt +RUN pip3 install -r /requirements.txt -COPY requirements-wheels.txt /requirements-wheels.txt -RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt - -# Make this a separate target so it can be built/cached optionally -FROM wheels as trt-wheels -ARG DEBIAN_FRONTEND -ARG TARGETARCH - -# Add TensorRT wheels to another folder -COPY requirements-tensorrt.txt /requirements-tensorrt.txt -RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r requirements-tensorrt.txt +COPY docker/main/requirements-wheels.txt /requirements-wheels.txt +RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt # Collect deps in a single layer @@ -165,7 +161,7 @@ COPY --from=go2rtc /rootfs/ / COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=s6-overlay /rootfs/ / COPY --from=models /rootfs/ / -COPY docker/rootfs/ / +COPY docker/main/rootfs/ / # Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc) @@ -183,10 +179,11 @@ ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/nginx/sbin:${PATH}" # Install dependencies -RUN --mount=type=bind,source=docker/install_deps.sh,target=/deps/install_deps.sh \ +RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ /deps/install_deps.sh RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ + python3 -m pip install --upgrade pip && \ pip3 install -U /deps/wheels/*.whl COPY --from=deps-rootfs / / @@ -204,24 +201,27 @@ ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T" ENTRYPOINT ["/init"] CMD [] +HEALTHCHECK --start-period=120s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ + CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 + # Frigate deps with Node.js and NPM for devcontainer FROM deps AS devcontainer # Do not start the actual Frigate service on devcontainer as it will be started by VSCode # But start a fake service for simulating the logs -COPY docker/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run +COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run # Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it RUN mkdir -p /opt/frigate \ && ln -svf /workspace/frigate/frigate /opt/frigate/frigate -# Install Node 16 -RUN apt-get update \ - && apt-get install wget -y \ - && wget -qO- https://deb.nodesource.com/setup_16.x | bash - \ - && apt-get install -y nodejs \ +# Install Node 20 +RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \ + chmod 500 nsolid_setup_deb.sh && \ + ./nsolid_setup_deb.sh 20 && \ + apt-get install nodejs -y \ && rm -rf /var/lib/apt/lists/* \ - && npm install -g npm@9 + && npm install -g npm@10 WORKDIR /workspace/frigate @@ -229,7 +229,7 @@ RUN apt-get update \ && apt-get install make -y \ && rm -rf /var/lib/apt/lists/* -RUN --mount=type=bind,source=./requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ +RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ pip3 install -r requirements-dev.txt CMD ["sleep", "infinity"] @@ -261,36 +261,3 @@ FROM deps AS frigate WORKDIR /opt/frigate/ COPY --from=rootfs / / - -# Build TensorRT-specific library -FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps - -RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ - /tensorrt_libyolo.sh - -# Frigate w/ TensorRT Support as separate image -FROM frigate AS frigate-tensorrt - -#Disable S6 Global timeout -ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 - -ENV TRT_VER=8.5.3 -ENV YOLO_MODELS="yolov7-tiny-416" - -COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so -COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos -COPY docker/support/tensorrt_detector/rootfs/ / - -RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl && \ - ldconfig - -# Dev Container w/ TRT -FROM devcontainer AS devcontainer-trt - -COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so -COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos -COPY docker/support/tensorrt_detector/rootfs/ / -COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so -RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/build_nginx.sh b/docker/main/build_nginx.sh similarity index 99% rename from docker/build_nginx.sh rename to docker/main/build_nginx.sh index 56c9a146d..fd604c122 100755 --- a/docker/build_nginx.sh +++ b/docker/main/build_nginx.sh @@ -2,7 +2,7 @@ set -euxo pipefail -NGINX_VERSION="1.25.1" +NGINX_VERSION="1.25.3" VOD_MODULE_VERSION="1.31" SECURE_TOKEN_MODULE_VERSION="1.5" RTMP_MODULE_VERSION="1.2.2" diff --git a/docker/fake_frigate_run b/docker/main/fake_frigate_run similarity index 100% rename from docker/fake_frigate_run rename to docker/main/fake_frigate_run diff --git a/docker/install_deps.sh b/docker/main/install_deps.sh similarity index 70% rename from docker/install_deps.sh rename to docker/main/install_deps.sh index 7d5242d83..43fff479b 100755 --- a/docker/install_deps.sh +++ b/docker/main/install_deps.sh @@ -10,11 +10,15 @@ apt-get -qq install --no-install-recommends -y \ wget \ procps vainfo \ unzip locales tzdata libxml2 xz-utils \ + python3.9 \ python3-pip \ curl \ jq \ nethogs +# ensure python3 defaults to python3.9 +update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + mkdir -p -m 600 /root/.gnupg # add coral repo @@ -23,8 +27,10 @@ curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \ echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections -# enable non-free repo -sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list +# enable non-free repo in Debian +if grep -q "Debian" /etc/issue; then + sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list +fi # coral drivers apt-get -qq update @@ -41,26 +47,24 @@ fi # ffmpeg -> arm64 if [[ "${TARGETARCH}" == "arm64" ]]; then - # add raspberry pi repo - gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E - echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list - apt-get -qq update - apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg + mkdir -p /usr/lib/btbn-ffmpeg + wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay fi # arch specific packages if [[ "${TARGETARCH}" == "amd64" ]]; then - # Use debian testing repo only for hwaccel packages - echo 'deb http://deb.debian.org/debian testing main non-free' >/etc/apt/sources.list.d/debian-testing.list + # use debian bookworm for hwaccel packages + echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list apt-get -qq update - # intel-opencl-icd specifically for GPU support in OpenVino apt-get -qq install --no-install-recommends --no-install-suggests -y \ intel-opencl-icd \ - mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools + mesa-va-drivers radeontop libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 intel-gpu-tools # something about this dependency requires it to be installed in a separate call rather than in the line above apt-get -qq install --no-install-recommends --no-install-suggests -y \ i965-va-driver-shaders - rm -f /etc/apt/sources.list.d/debian-testing.list + rm -f /etc/apt/sources.list.d/debian-bookworm.list fi if [[ "${TARGETARCH}" == "arm64" ]]; then diff --git a/docker/install_s6_overlay.sh b/docker/main/install_s6_overlay.sh similarity index 95% rename from docker/install_s6_overlay.sh rename to docker/main/install_s6_overlay.sh index 4e858ef07..75acba774 100755 --- a/docker/install_s6_overlay.sh +++ b/docker/main/install_s6_overlay.sh @@ -2,7 +2,7 @@ set -euxo pipefail -s6_version="3.1.4.1" +s6_version="3.1.5.0" if [[ "${TARGETARCH}" == "amd64" ]]; then s6_arch="x86_64" diff --git a/docker/main/requirements-dev.txt b/docker/main/requirements-dev.txt new file mode 100644 index 000000000..af3ee5763 --- /dev/null +++ b/docker/main/requirements-dev.txt @@ -0,0 +1 @@ +ruff diff --git a/docker/main/requirements-ov.txt b/docker/main/requirements-ov.txt new file mode 100644 index 000000000..20e5a29c1 --- /dev/null +++ b/docker/main/requirements-ov.txt @@ -0,0 +1,5 @@ +numpy +# Openvino Library - Custom built with MYRIAD support +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' +openvino-dev[tensorflow2] @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino_dev-2022.3.1-1-py3-none-any.whl diff --git a/requirements-wheels.txt b/docker/main/requirements-wheels.txt similarity index 58% rename from requirements-wheels.txt rename to docker/main/requirements-wheels.txt index 3232e8f31..f4167744e 100644 --- a/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -1,29 +1,29 @@ click == 8.1.* Flask == 2.3.* -faster-fifo == 1.4.* imutils == 0.5.* matplotlib == 3.7.* -mypy == 1.4.1 +mypy == 1.6.1 numpy == 1.23.* onvif_zeep == 0.2.12 opencv-python-headless == 4.7.0.* paho-mqtt == 1.6.* -peewee == 3.16.* -peewee_migrate == 1.11.* +peewee == 3.17.* +peewee_migrate == 1.12.* psutil == 5.9.* pydantic == 1.10.* git+https://github.com/fbcotter/py3nvml#egg=py3nvml -PyYAML == 6.0 -pytz == 2023.3 -ruamel.yaml == 0.17.* -tzlocal == 5.0.* +PyYAML == 6.0.* +pytz == 2023.3.post1 +ruamel.yaml == 0.18.* +tzlocal == 5.2 types-PyYAML == 6.0.* requests == 2.31.* types-requests == 2.31.* -scipy == 1.10.* +scipy == 1.11.* norfair == 2.2.* setproctitle == 1.3.* ws4py == 0.5.* +unidecode == 1.3.* # Openvino Library - Custom built with MYRIAD support -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' diff --git a/requirements.txt b/docker/main/requirements.txt similarity index 100% rename from requirements.txt rename to docker/main/requirements.txt diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run similarity index 86% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index fd5fcb568..851d78799 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -45,8 +45,13 @@ function get_ip_and_port_from_supervisor() { export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') +if [[ -f "/dev/shm/go2rtc.yaml" ]]; then + echo "[INFO] Removing stale config from last run..." + rm /dev/shm/go2rtc.yaml +fi + if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then - echo "[INFO] Preparing go2rtc config..." + echo "[INFO] Preparing new go2rtc config..." if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then # Running as a Home Assistant add-on, infer the IP address and port @@ -54,6 +59,8 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then fi python3 /usr/local/go2rtc/create_config.py +else + echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually." fi readonly config_path="/config" diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/main/rootfs/usr/local/go2rtc/create_config.py similarity index 59% rename from docker/rootfs/usr/local/go2rtc/create_config.py rename to docker/main/rootfs/usr/local/go2rtc/create_config.py index 0531b173d..51d75f0e0 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/main/rootfs/usr/local/go2rtc/create_config.py @@ -3,6 +3,7 @@ import json import os import sys +from pathlib import Path import yaml @@ -16,6 +17,14 @@ sys.path.remove("/opt/frigate") FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} +# read docker secret files as env vars too +if os.path.isdir("/run/secrets"): + for secret_file in os.listdir("/run/secrets"): + if secret_file.startswith("FRIGATE_"): + FRIGATE_ENV_VARS[secret_file] = Path( + os.path.join("/run/secrets", secret_file) + ).read_text() + config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") # Check if we can use .yaml instead of .yml @@ -39,13 +48,25 @@ if go2rtc_config.get("api") is None: elif go2rtc_config["api"].get("origin") is None: go2rtc_config["api"]["origin"] = "*" +# Need to set default location for HA config +if go2rtc_config.get("hass") is None: + go2rtc_config["hass"] = {"config": "/config"} + # we want to ensure that logs are easy to read if go2rtc_config.get("log") is None: go2rtc_config["log"] = {"format": "text"} elif go2rtc_config["log"].get("format") is None: go2rtc_config["log"]["format"] = "text" -if not go2rtc_config.get("webrtc", {}).get("candidates", []): +# ensure there is a default webrtc config +if not go2rtc_config.get("webrtc"): + go2rtc_config["webrtc"] = {} + +# go2rtc should listen on 8555 tcp & udp by default +if not go2rtc_config["webrtc"].get("listen"): + go2rtc_config["webrtc"]["listen"] = ":8555" + +if not go2rtc_config["webrtc"].get("candidates", []): default_candidates = [] # use internal candidate if it was discovered when running through the add-on internal_candidate = os.environ.get( @@ -67,8 +88,19 @@ else: # as source for frigate and the integration supports HLS playback if go2rtc_config.get("rtsp") is None: go2rtc_config["rtsp"] = {"default_query": "mp4"} -elif go2rtc_config["rtsp"].get("default_query") is None: - go2rtc_config["rtsp"]["default_query"] = "mp4" +else: + if go2rtc_config["rtsp"].get("default_query") is None: + go2rtc_config["rtsp"]["default_query"] = "mp4" + + if go2rtc_config["rtsp"].get("username") is not None: + go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format( + **FRIGATE_ENV_VARS + ) + + if go2rtc_config["rtsp"].get("password") is not None: + go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format( + **FRIGATE_ENV_VARS + ) # need to replace ffmpeg command when using ffmpeg4 if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: @@ -81,16 +113,43 @@ if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: "rtsp" ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" +# add hardware acceleration presets for rockchip devices +# may be removed if frigate uses a go2rtc version that includes these presets +if go2rtc_config.get("ffmpeg") is None: + go2rtc_config["ffmpeg"] = { + "h264/rk": "-c:v h264_rkmpp_encoder -g 50 -bf 0", + "h265/rk": "-c:v hevc_rkmpp_encoder -g 50 -bf 0", + } +else: + if go2rtc_config["ffmpeg"].get("h264/rk") is None: + go2rtc_config["ffmpeg"]["h264/rk"] = "-c:v h264_rkmpp_encoder -g 50 -bf 0" + + if go2rtc_config["ffmpeg"].get("h265/rk") is None: + go2rtc_config["ffmpeg"]["h265/rk"] = "-c:v hevc_rkmpp_encoder -g 50 -bf 0" + for name in go2rtc_config.get("streams", {}): stream = go2rtc_config["streams"][name] if isinstance(stream, str): - go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format( - **FRIGATE_ENV_VARS - ) + try: + go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format( + **FRIGATE_ENV_VARS + ) + except KeyError as e: + print( + "[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info." + ) + sys.exit(e) + elif isinstance(stream, list): for i, stream in enumerate(stream): - go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS) + try: + go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS) + except KeyError as e: + print( + "[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info." + ) + sys.exit(e) # add birdseye restream stream if enabled if config.get("birdseye", {}).get("restream", False): diff --git a/docker/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf similarity index 57% rename from docker/rootfs/usr/local/nginx/conf/nginx.conf rename to docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 35703fb2c..1d3b80de3 100644 --- a/docker/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -32,6 +32,13 @@ http { gzip_proxied no-cache no-store private expired auth; gzip_vary on; + proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off; + + map $sent_http_content_type $should_not_cache { + 'application/json' 0; + default 1; + } + upstream frigate_api { server 127.0.0.1:5001; keepalive 1024; @@ -93,10 +100,6 @@ http { secure_token $args; secure_token_types application/vnd.apple.mpegurl; - add_header Access-Control-Allow-Headers '*'; - add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range'; - add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS'; - add_header Access-Control-Allow-Origin '*'; add_header Cache-Control "no-store"; expires off; } @@ -104,16 +107,6 @@ http { location /stream/ { add_header Cache-Control "no-store"; expires off; - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } types { application/dash+xml mpd; @@ -126,16 +119,6 @@ http { } location /clips/ { - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } types { video/mp4 mp4; @@ -152,17 +135,6 @@ http { } location /recordings/ { - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - types { video/mp4 mp4; } @@ -173,17 +145,6 @@ http { } location /exports/ { - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } - types { video/mp4 mp4; } @@ -195,68 +156,69 @@ http { location /ws { proxy_pass http://mqtt_ws/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location /live/jsmpeg/ { proxy_pass http://jsmpeg/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location /live/mse/ { proxy_pass http://go2rtc/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location /live/webrtc/ { proxy_pass http://go2rtc/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location ~* /api/go2rtc([/]?.*)$ { proxy_pass http://go2rtc; rewrite ^/api/go2rtc(.*)$ /api$1 break; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location ~* /api/.*\.(jpg|jpeg|png)$ { - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS'; rewrite ^/api/(.*)$ $1 break; proxy_pass http://frigate_api; - proxy_pass_request_headers on; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; + include proxy.conf; } location /api/ { add_header Cache-Control "no-store"; expires off; - - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range'; proxy_pass http://frigate_api/; - proxy_pass_request_headers on; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; + include proxy.conf; + + proxy_cache api_cache; + proxy_cache_lock on; + proxy_cache_use_stale updating; + proxy_cache_valid 200 5s; + proxy_cache_bypass $http_x_cache_bypass; + proxy_no_cache $should_not_cache; + add_header X-Cache-Status $upstream_cache_status; + + location /api/vod/ { + proxy_pass http://frigate_api/vod/; + include proxy.conf; + proxy_cache off; + } + + location /api/stats { + access_log off; + rewrite ^/api/(.*)$ $1 break; + proxy_pass http://frigate_api; + include proxy.conf; + } + + location /api/version { + access_log off; + rewrite ^/api/(.*)$ $1 break; + proxy_pass http://frigate_api; + include proxy.conf; + } } location / { @@ -299,4 +261,4 @@ rtmp { meta copy; } } -} \ No newline at end of file +} diff --git a/docker/main/rootfs/usr/local/nginx/conf/proxy.conf b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf new file mode 100644 index 000000000..442c78718 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf @@ -0,0 +1,4 @@ +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection "Upgrade"; +proxy_set_header Host $host; \ No newline at end of file diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile new file mode 100644 index 000000000..b27e4f223 --- /dev/null +++ b/docker/rockchip/Dockerfile @@ -0,0 +1,32 @@ +# syntax=docker/dockerfile:1.6 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +FROM wheels as rk-wheels +COPY docker/main/requirements-wheels.txt /requirements-wheels.txt +COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt +RUN sed -i "/https:\/\//d" /requirements-wheels.txt +RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt + +FROM deps AS rk-deps +ARG TARGETARCH + +RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ + pip3 install -U /deps/rk-wheels/*.whl + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / + +ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/ +ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/ + +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/ + +RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg +RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/ +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffprobe /usr/lib/btbn-ffmpeg/bin/ diff --git a/docker/rockchip/requirements-wheels-rk.txt b/docker/rockchip/requirements-wheels-rk.txt new file mode 100644 index 000000000..9a3fe5c77 --- /dev/null +++ b/docker/rockchip/requirements-wheels-rk.txt @@ -0,0 +1,2 @@ +hide-warnings == 0.17 +rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v1.5.2/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl \ No newline at end of file diff --git a/docker/rockchip/rk.hcl b/docker/rockchip/rk.hcl new file mode 100644 index 000000000..513fefa25 --- /dev/null +++ b/docker/rockchip/rk.hcl @@ -0,0 +1,34 @@ +target wget { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "wget" +} + +target wheels { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "wheels" +} + +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "rootfs" +} + +target rk { + dockerfile = "docker/rockchip/Dockerfile" + contexts = { + wget = "target:wget", + wheels = "target:wheels", + deps = "target:deps", + rootfs = "target:rootfs" + } + platforms = ["linux/arm64"] +} \ No newline at end of file diff --git a/docker/rockchip/rk.mk b/docker/rockchip/rk.mk new file mode 100644 index 000000000..0d9bde16a --- /dev/null +++ b/docker/rockchip/rk.mk @@ -0,0 +1,10 @@ +BOARDS += rk + +local-rk: version + docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk + +build-rk: version + docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk + +push-rk: build-rk + docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk \ No newline at end of file diff --git a/docker/rpi/Dockerfile b/docker/rpi/Dockerfile new file mode 100644 index 000000000..581ca7ff8 --- /dev/null +++ b/docker/rpi/Dockerfile @@ -0,0 +1,16 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +FROM deps AS rpi-deps +ARG TARGETARCH + +RUN rm -rf /usr/lib/btbn-ffmpeg/ + +# Install dependencies +RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \ + /deps/install_deps.sh + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / diff --git a/docker/rpi/install_deps.sh b/docker/rpi/install_deps.sh new file mode 100755 index 000000000..9716623ca --- /dev/null +++ b/docker/rpi/install_deps.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -euxo pipefail + +apt-get -qq update + +apt-get -qq install --no-install-recommends -y \ + apt-transport-https \ + gnupg \ + wget \ + procps vainfo \ + unzip locales tzdata libxml2 xz-utils \ + python3-pip \ + curl \ + jq \ + nethogs + +mkdir -p -m 600 /root/.gnupg + +# enable non-free repo +sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list + +# ffmpeg -> arm64 +if [[ "${TARGETARCH}" == "arm64" ]]; then + # add raspberry pi repo + gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E + echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list + apt-get -qq update + apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg +fi diff --git a/docker/rpi/rpi.hcl b/docker/rpi/rpi.hcl new file mode 100644 index 000000000..66f97c16d --- /dev/null +++ b/docker/rpi/rpi.hcl @@ -0,0 +1,20 @@ +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "rootfs" +} + +target rpi { + dockerfile = "docker/rpi/Dockerfile" + contexts = { + deps = "target:deps", + rootfs = "target:rootfs" + } + platforms = ["linux/arm64"] +} \ No newline at end of file diff --git a/docker/rpi/rpi.mk b/docker/rpi/rpi.mk new file mode 100644 index 000000000..c1282b011 --- /dev/null +++ b/docker/rpi/rpi.mk @@ -0,0 +1,10 @@ +BOARDS += rpi + +local-rpi: version + docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi + +build-rpi: version + docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi + +push-rpi: build-rpi + docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi \ No newline at end of file diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run deleted file mode 100755 index 5f0e43553..000000000 --- a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run +++ /dev/null @@ -1,53 +0,0 @@ -#!/command/with-contenv bash -# shellcheck shell=bash -# Generate models for the TensorRT detector - -set -o errexit -o nounset -o pipefail - -MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} -OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" - -# Create output folder -mkdir -p ${OUTPUT_FOLDER} - -FIRST_MODEL=true -MODEL_CONVERT="" - -for model in ${YOLO_MODELS//,/ } -do - # Remove old link in case path/version changed - rm -f ${MODEL_CACHE_DIR}/${model}.trt - - if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then - if [[ ${FIRST_MODEL} = true ]]; then - MODEL_CONVERT="${model}" - FIRST_MODEL=false; - else - MODEL_CONVERT+=",${model}"; - fi - else - ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt - fi -done - -if [[ -z ${MODEL_CONVERT} ]]; then - echo "No models to convert." - exit 0 -fi - -echo "Generating the following TRT Models: ${MODEL_CONVERT}" - -# Build trt engine -cd /usr/local/src/tensorrt_demos/yolo - -# Download yolo weights -./download_yolo.sh $MODEL_CONVERT > /dev/null - -for model in ${MODEL_CONVERT//,/ } -do - echo "Converting ${model} model" - python3 yolo_to_onnx.py -m ${model} > /dev/null - python3 onnx_to_tensorrt.py -m ${model} > /dev/null - cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt - ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt -done diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64 new file mode 100644 index 000000000..075726eda --- /dev/null +++ b/docker/tensorrt/Dockerfile.amd64 @@ -0,0 +1,32 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +# Make this a separate target so it can be built/cached optionally +FROM wheels as trt-wheels +ARG DEBIAN_FRONTEND +ARG TARGETARCH + +# Add TensorRT wheels to another folder +COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt +RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt + +FROM tensorrt-base AS frigate-tensorrt +ENV TRT_VER=8.5.3 +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + pip3 install -U /deps/trt-wheels/*.whl && \ + ldconfig + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / + +# Dev Container w/ TRT +FROM devcontainer AS devcontainer-trt + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/tensorrt/detector/rootfs/ / +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 new file mode 100644 index 000000000..70184bf9b --- /dev/null +++ b/docker/tensorrt/Dockerfile.arm64 @@ -0,0 +1,79 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +ARG BASE_IMAGE +FROM ${BASE_IMAGE} AS build-wheels +ARG DEBIAN_FRONTEND + +# Use a separate container to build wheels to prevent build dependencies in final image +RUN apt-get -qq update \ + && apt-get -qq install -y --no-install-recommends \ + python3.9 python3.9-dev \ + wget build-essential cmake git \ + && rm -rf /var/lib/apt/lists/* + +# Ensure python3 defaults to python3.9 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + +RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && python3 get-pip.py "pip" + + +FROM build-wheels AS trt-wheels +ARG DEBIAN_FRONTEND +ARG TARGETARCH + +# python-tensorrt build deps are 3.4 GB! +RUN apt-get update \ + && apt-get install -y ccache cuda-cudart-dev-* cuda-nvcc-* libnvonnxparsers-dev libnvparsers-dev libnvinfer-plugin-dev \ + && ([ -e /usr/local/cuda ] || ln -s /usr/local/cuda-* /usr/local/cuda) \ + && rm -rf /var/lib/apt/lists/*; + +# Determine version of tensorrt already installed in base image, e.g. "Version: 8.4.1-1+cuda11.4" +RUN NVINFER_VER=$(dpkg -s libnvinfer8 | grep -Po "Version: \K.*") \ + && echo $NVINFER_VER | grep -Po "^\d+\.\d+\.\d+" > /etc/TENSORRT_VER + +RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,target=/deps/build_python_tensorrt.sh \ + --mount=type=cache,target=/root/.ccache \ + export PATH="/usr/lib/ccache:$PATH" CCACHE_DIR=/root/.ccache CCACHE_MAXSIZE=2G \ + && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh + +COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt +RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt + +FROM build-wheels AS trt-model-wheels +ARG DEBIAN_FRONTEND + +RUN apt-get update \ + && apt-get install -y protobuf-compiler libprotobuf-dev \ + && rm -rf /var/lib/apt/lists/* +RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \ + pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt + +FROM wget AS jetson-ffmpeg +ARG DEBIAN_FRONTEND +ENV CCACHE_DIR /root/.ccache +ENV CCACHE_MAXSIZE 2G +RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps/build_jetson_ffmpeg.sh \ + --mount=type=cache,target=/root/.ccache \ + /deps/build_jetson_ffmpeg.sh + +# Frigate w/ TensorRT for NVIDIA Jetson platforms +FROM tensorrt-base AS frigate-tensorrt +RUN apt-get update \ + && apt-get install -y python-is-python3 libprotobuf17 \ + && rm -rf /var/lib/apt/lists/* + +RUN rm -rf /usr/lib/btbn-ffmpeg/ +COPY --from=jetson-ffmpeg /rootfs / + +COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ + pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ + && ldconfig + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base new file mode 100644 index 000000000..b0015016d --- /dev/null +++ b/docker/tensorrt/Dockerfile.base @@ -0,0 +1,29 @@ +# syntax=docker/dockerfile:1.6 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3 + +# Build TensorRT-specific library +FROM ${TRT_BASE} AS trt-deps + +RUN apt-get update \ + && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ + && rm -rf /var/lib/apt/lists/* +RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ + /tensorrt_libyolo.sh + +# Frigate w/ TensorRT Support as separate image +FROM deps AS tensorrt-base + +#Disable S6 Global timeout +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/tensorrt/detector/rootfs/ / +ENV YOLO_MODELS="yolov7-320" + +HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ + CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 diff --git a/docker/tensorrt/build_jetson_ffmpeg.sh b/docker/tensorrt/build_jetson_ffmpeg.sh new file mode 100755 index 000000000..8c532ebc3 --- /dev/null +++ b/docker/tensorrt/build_jetson_ffmpeg.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# For jetson platforms, build ffmpeg with custom patches. NVIDIA supplies a deb +# with accelerated decoding, but it doesn't have accelerated scaling or encoding + +set -euxo pipefail + +INSTALL_PREFIX=/rootfs/usr/local + +apt-get -qq update +apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config +apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev + +pushd /tmp + +# Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi) +if [ -e /usr/local/cuda-10.2 ]; then + # assume Jetpack 4.X + wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2 +else + # assume Jetpack 5.X + wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2 +fi +tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2 + +wget -q https://github.com/madsciencetist/jetson-ffmpeg/archive/refs/heads/master.zip +unzip master.zip && rm master.zip && cd jetson-ffmpeg-master +LD_LIBRARY_PATH=$(pwd)/stubs:$LD_LIBRARY_PATH # tegra multimedia libs aren't available in image, so use stubs for ffmpeg build +mkdir build +cd build +cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$INSTALL_PREFIX +make -j$(nproc) +make install +cd ../../ + +# Install nv-codec-headers to enable ffnvcodec filters (scale_cuda) +wget -q https://github.com/FFmpeg/nv-codec-headers/archive/refs/heads/master.zip +unzip master.zip && rm master.zip && cd nv-codec-headers-master +make PREFIX=$INSTALL_PREFIX install +cd ../ && rm -rf nv-codec-headers-master + +# Build ffmpeg with nvmpi patch +wget -q https://ffmpeg.org/releases/ffmpeg-6.0.tar.xz +tar xaf ffmpeg-*.tar.xz && rm ffmpeg-*.tar.xz && cd ffmpeg-* +patch -p1 < ../jetson-ffmpeg-master/ffmpeg_patches/ffmpeg6.0_nvmpi.patch +export PKG_CONFIG_PATH=$INSTALL_PREFIX/lib/pkgconfig +# enable Jetson codecs but disable dGPU codecs +./configure --cc='ccache gcc' --cxx='ccache g++' \ + --enable-shared --disable-static --prefix=$INSTALL_PREFIX \ + --enable-gpl --enable-libx264 --enable-libx265 \ + --enable-nvmpi --enable-ffnvcodec --enable-cuda-llvm \ + --disable-cuvid --disable-nvenc --disable-nvdec \ + || { cat ffbuild/config.log && false; } +make -j$(nproc) +make install +cd ../ + +rm -rf /var/lib/apt/lists/* +popd diff --git a/docker/tensorrt/detector/build_python_tensorrt.sh b/docker/tensorrt/detector/build_python_tensorrt.sh new file mode 100755 index 000000000..21b6ae268 --- /dev/null +++ b/docker/tensorrt/detector/build_python_tensorrt.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -euxo pipefail + +mkdir -p /trt-wheels + +if [[ "${TARGETARCH}" == "arm64" ]]; then + + # NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9, + # so we must build python-tensorrt ourselves. + + # Get python-tensorrt source + mkdir /workspace + cd /workspace + git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1 + + # Collect dependencies + EXT_PATH=/workspace/external && mkdir -p $EXT_PATH + pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11 + ln -s /usr/include/python3.9 $EXT_PATH/python3.9 + ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/ + + # Build wheel + cd /workspace/TensorRT/python + EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh + mv build/dist/*.whl /trt-wheels/ + +fi diff --git a/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf similarity index 100% rename from docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf rename to docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare similarity index 100% rename from docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare rename to docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base similarity index 100% rename from docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base rename to docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run new file mode 100755 index 000000000..c39c7a0aa --- /dev/null +++ b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run @@ -0,0 +1,109 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Generate models for the TensorRT detector + +# One or more comma-separated models may be specified via the YOLO_MODELS env. +# Append "-dla" to the model name to generate a DLA model with GPU fallback; +# otherwise a GPU-only model will be generated. + +set -o errexit -o nounset -o pipefail + +MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} +TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)} +OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" + +# Create output folder +mkdir -p ${OUTPUT_FOLDER} + +FIRST_MODEL=true +MODEL_DOWNLOAD="" +MODEL_CONVERT="" + +for model in ${YOLO_MODELS//,/ } +do + # Remove old link in case path/version changed + rm -f ${MODEL_CACHE_DIR}/${model}.trt + + if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then + if [[ ${FIRST_MODEL} = true ]]; then + MODEL_DOWNLOAD="${model%-dla}"; + MODEL_CONVERT="${model}" + FIRST_MODEL=false; + else + MODEL_DOWNLOAD+=",${model%-dla}"; + MODEL_CONVERT+=",${model}"; + fi + else + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + fi +done + +if [[ -z ${MODEL_CONVERT} ]]; then + echo "No models to convert." + exit 0 +fi + +# Setup ENV to select GPU for conversion +if [ ! -z ${TRT_MODEL_PREP_DEVICE+x} ]; then + if [ ! -z ${CUDA_VISIBLE_DEVICES+x} ]; then + PREVIOUS_CVD="$CUDA_VISIBLE_DEVICES" + unset CUDA_VISIBLE_DEVICES + fi + export CUDA_VISIBLE_DEVICES="$TRT_MODEL_PREP_DEVICE" +fi + +# On Jetpack 4.6, the nvidia container runtime will mount several host nvidia libraries into the +# container which should not be present in the image - if they are, TRT model generation will +# fail or produce invalid models. Thus we must request the user to install them on the host in +# order to run libyolo here. +# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image. +if [[ "$(arch)" == "aarch64" ]]; then + if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then + echo "ERROR: Container must be launched with nvidia runtime" + exit 1 + elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvparsers.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.8 ]]; then + echo "ERROR: Please run the following on the HOST:" + echo " sudo apt install libnvinfer8 libnvinfer-plugin8 libnvparsers8 libnvonnxparsers8 nvidia-container" + exit 1 + fi +fi + +echo "Generating the following TRT Models: ${MODEL_CONVERT}" + +# Build trt engine +cd /usr/local/src/tensorrt_demos/yolo + +echo "Downloading yolo weights" +./download_yolo.sh $MODEL_DOWNLOAD 2> /dev/null + +for model in ${MODEL_CONVERT//,/ } +do + python3 yolo_to_onnx.py -m ${model%-dla} > /dev/null + + echo -e "\nGenerating ${model}.trt. This may take a few minutes.\n"; start=$(date +%s) + if [[ $model == *-dla ]]; then + cmd="python3 onnx_to_tensorrt.py -m ${model%-dla} --dla_core 0" + else + cmd="python3 onnx_to_tensorrt.py -m ${model}" + fi + $cmd > /tmp/onnx_to_tensorrt.log || { cat /tmp/onnx_to_tensorrt.log && continue; } + + mv ${model%-dla}.trt ${OUTPUT_FOLDER}/${model}.trt; + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + echo "Generated ${model}.trt in $(($(date +%s)-start)) seconds" +done + +# Restore ENV after conversion +if [ ! -z ${TRT_MODEL_PREP_DEVICE+x} ]; then + unset CUDA_VISIBLE_DEVICES + if [ ! -z ${PREVIOUS_CVD+x} ]; then + export CUDA_VISIBLE_DEVICES="$PREVIOUS_CVD" + fi +fi + +# Print which models exist in output folder +echo "Available tensorrt models:" +cd ${OUTPUT_FOLDER} && ls *.trt; diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type similarity index 100% rename from docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type rename to docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up similarity index 100% rename from docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up rename to docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up diff --git a/docker/support/tensorrt_detector/tensorrt_libyolo.sh b/docker/tensorrt/detector/tensorrt_libyolo.sh similarity index 76% rename from docker/support/tensorrt_detector/tensorrt_libyolo.sh rename to docker/tensorrt/detector/tensorrt_libyolo.sh index e6fc415e5..91b9340a9 100755 --- a/docker/support/tensorrt_detector/tensorrt_libyolo.sh +++ b/docker/tensorrt/detector/tensorrt_libyolo.sh @@ -8,7 +8,10 @@ SCRIPT_DIR="/usr/local/src/tensorrt_demos" git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download # Build libyolo -cd ./tensorrt_demos/plugins && make all +if [ ! -e /usr/local/cuda ]; then + ln -s /usr/local/cuda-* /usr/local/cuda +fi +cd ./tensorrt_demos/plugins && make all -j$(nproc) cp libyolo_layer.so /usr/local/lib/libyolo_layer.so # Store yolo scripts for later conversion diff --git a/requirements-tensorrt.txt b/docker/tensorrt/requirements-amd64.txt similarity index 100% rename from requirements-tensorrt.txt rename to docker/tensorrt/requirements-amd64.txt diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt new file mode 100644 index 000000000..9b12dac33 --- /dev/null +++ b/docker/tensorrt/requirements-arm64.txt @@ -0,0 +1 @@ +cuda-python == 11.7; platform_machine == 'aarch64' diff --git a/docker/tensorrt/requirements-models-arm64.txt b/docker/tensorrt/requirements-models-arm64.txt new file mode 100644 index 000000000..3490a7897 --- /dev/null +++ b/docker/tensorrt/requirements-models-arm64.txt @@ -0,0 +1,3 @@ +onnx == 1.14.0; platform_machine == 'aarch64' +protobuf == 3.20.3; platform_machine == 'aarch64' +numpy == 1.23.*; platform_machine == 'aarch64' # required by python-tensorrt 8.2.1 (Jetpack 4.6) diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl new file mode 100644 index 000000000..56e294100 --- /dev/null +++ b/docker/tensorrt/trt.hcl @@ -0,0 +1,94 @@ +variable "ARCH" { + default = "amd64" +} +variable "BASE_IMAGE" { + default = null +} +variable "SLIM_BASE" { + default = null +} +variable "TRT_BASE" { + default = null +} + +target "_build_args" { + args = { + BASE_IMAGE = BASE_IMAGE, + SLIM_BASE = SLIM_BASE, + TRT_BASE = TRT_BASE + } + platforms = ["linux/${ARCH}"] +} + +target wget { + dockerfile = "docker/main/Dockerfile" + target = "wget" + inherits = ["_build_args"] +} + +target deps { + dockerfile = "docker/main/Dockerfile" + target = "deps" + inherits = ["_build_args"] +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + target = "rootfs" + inherits = ["_build_args"] +} + +target wheels { + dockerfile = "docker/main/Dockerfile" + target = "wheels" + inherits = ["_build_args"] +} + +target devcontainer { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/amd64"] + target = "devcontainer" +} + +target "trt-deps" { + dockerfile = "docker/tensorrt/Dockerfile.base" + context = "." + contexts = { + deps = "target:deps", + } + inherits = ["_build_args"] +} + +target "tensorrt-base" { + dockerfile = "docker/tensorrt/Dockerfile.base" + context = "." + contexts = { + deps = "target:deps", + } + inherits = ["_build_args"] +} + +target "tensorrt" { + dockerfile = "docker/tensorrt/Dockerfile.${ARCH}" + context = "." + contexts = { + wget = "target:wget", + tensorrt-base = "target:tensorrt-base", + rootfs = "target:rootfs" + wheels = "target:wheels" + } + target = "frigate-tensorrt" + inherits = ["_build_args"] +} + +target "devcontainer-trt" { + dockerfile = "docker/tensorrt/Dockerfile.amd64" + context = "." + contexts = { + wheels = "target:wheels", + trt-deps = "target:trt-deps", + devcontainer = "target:devcontainer" + } + platforms = ["linux/amd64"] + target = "devcontainer-trt" +} diff --git a/docker/tensorrt/trt.mk b/docker/tensorrt/trt.mk new file mode 100644 index 000000000..0e01c1402 --- /dev/null +++ b/docker/tensorrt/trt.mk @@ -0,0 +1,26 @@ +BOARDS += trt + +JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1 +JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1 +X86_DGPU_ARGS := ARCH=amd64 +JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE) +JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE) + +local-trt: version + $(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt + +local-trt-jp4: version + $(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt + +local-trt-jp5: version + $(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt + +build-trt: + $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt + $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt + $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt + +push-trt: build-trt + $(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt + $(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt + $(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index af2d7872f..50cd5ff79 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -120,7 +120,7 @@ NOTE: The folder that is mapped from the host needs to be the folder that contai ## Custom go2rtc version -Frigate currently includes go2rtc v1.5.0, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.8.4, there may be certain cases where you want to run a different version of go2rtc. To do this: @@ -128,3 +128,34 @@ To do this: 2. Rename the build to `go2rtc`. 3. Give `go2rtc` execute permission. 4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs. + +## Validating your config.yaml file updates + +When frigate starts up, it checks whether your config file is valid, and if it is not, the process exits. To minimize interruptions when updating your config, you have three options -- you can edit the config via the WebUI which has built in validation, use the config API, or you can validate on the command line using the frigate docker container. + +### Via API + +Frigate can accept a new configuration file as JSON at the `/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid. + +```bash +curl -X POST http://frigate_host:5000/config/save -d @config.json +``` + +if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json: + +```bash +yq r -j config.yml | curl -X POST http://frigate_host:5000/config/save -d @- +``` + +### Via Command Line + +You can also validate your config at the command line by using the docker container itself. In CI/CD, you leverage the return code to determine if your config is valid, Frigate will return `1` if the config is invalid, or `0` if it's valid. + +```bash +docker run \ + -v $(pwd)/config.yml:/config/config.yml \ + --entrypoint python3 \ + ghcr.io/blakeblackshear/frigate:stable \ + -u -m frigate \ + --validate_config +``` diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md index ef1d8227c..b783daa69 100644 --- a/docs/docs/configuration/audio_detectors.md +++ b/docs/docs/configuration/audio_detectors.md @@ -48,15 +48,26 @@ cameras: - detect ``` +### Configuring Minimum Volume + +The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. MQTT explorer can be used on the audio topic to see what volume level is being detected. + +:::tip + +Volume is considered motion for recordings, this means when the `record -> retain -> mode` is set to `motion` any time audio volume is > min_volume that recording segment for that camera will be kept. + +::: + ### Configuring Audio Events -The included audio model has over 500 different types of audio that can be detected, many of which are not practical. By default `bark`, `speech`, `yell`, and `scream` are enabled but these can be customized. +The included audio model has over [500 different types](https://github.com/blakeblackshear/frigate/blob/dev/audio-labelmap.txt) of audio that can be detected, many of which are not practical. By default `bark`, `fire_alarm`, `scream`, `speech`, and `yell` are enabled but these can be customized. ```yaml audio: enabled: True listen: - bark + - fire_alarm - scream - speech - yell diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md new file mode 100644 index 000000000..31048db2e --- /dev/null +++ b/docs/docs/configuration/autotracking.md @@ -0,0 +1,166 @@ +--- +id: autotracking +title: Camera Autotracking +--- + +An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame. + +![Autotracking example with zooming](/img/frigate-autotracking-example.gif) + +## Autotracking behavior + +Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it. + +Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will autotrack that new object. + +When tracking has ended, Frigate will return to the camera firmware's PTZ preset specified by the `return_preset` configuration entry. + +## Checking ONVIF camera support + +Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry). + +Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported. + +Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera. + +A growing list of cameras and brands that have been reported by users to work with Frigate's autotracking can be found [here](cameras.md). + +## Configuration + +First, set up a PTZ preset in your camera's firmware and give it a name. If you're unsure how to do this, consult the documentation for your camera manufacturer's firmware. Some tutorials for common brands: [Amcrest](https://www.youtube.com/watch?v=lJlE9-krmrM), [Reolink](https://www.youtube.com/watch?v=VAnxHUY5i5w), [Dahua](https://www.youtube.com/watch?v=7sNbc5U-k54). + +Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. + +An [ONVIF connection](cameras.md) is required for autotracking to function. Also, a [motion mask](masks.md) over your camera's timestamp and any overlay text is recommended to ensure they are completely excluded from scene change calculations when the camera is moving. + +Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. + +```yaml +cameras: + ptzcamera: + ... + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: calibrate the camera on startup (default: shown below) + # A calibration will move the PTZ in increments and measure the time it takes to move. + # The results are used to help estimate the position of tracked objects after a camera move. + # Frigate will update your config file automatically after a calibration with + # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. + calibrate_on_startup: False + # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) + # Available options are: disabled, absolute, and relative + # disabled - don't zoom in/out on autotracked objects, use pan/tilt only + # absolute - use absolute zooming (supported by most PTZ capable cameras) + # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) + zooming: disabled + # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) + # A lower value will keep more of the scene in view around a tracked object. + # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. + # The value should be between 0.1 and 0.75 + zoom_factor: 0.3 + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 + # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) + movement_weights: [] +``` + +## Calibration + +PTZ motors operate at different speeds. Performing a calibration will direct Frigate to measure this speed over a variety of movements and use those measurements to better predict the amount of movement necessary to keep autotracked objects in the center of the frame. + +Calibration is optional, but will greatly assist Frigate in autotracking objects that move across the camera's field of view more quickly. + +To begin calibration, set the `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually. + +After calibration has ended, your PTZ will be moved to the preset specified by `return_preset`. + +:::note + +Frigate's web UI and all other cameras will be unresponsive while calibration is in progress. This is expected and normal to avoid excessive network traffic or CPU usage during calibration. Calibration for most PTZs will take about two minutes. The Frigate log will show calibration progress and any errors. + +::: + +At this point, Frigate will be running and will continue to refine and update the `movement_weights` parameter in your config automatically as the PTZ moves during autotracking and more measurements are obtained. + +Before restarting Frigate, you should set `calibrate_on_startup` in your config file to `False`, otherwise your refined `movement_weights` will be overwritten and calibration will occur when starting again. + +You can recalibrate at any time by removing the `movement_weights` parameter, setting `calibrate_on_startup` to `True`, and then restarting Frigate. You may need to recalibrate or remove `movement_weights` from your config altogether if autotracking is erratic. If you change your `return_preset` in any way or if you change your camera's detect `fps` value, a recalibration is also recommended. + +If you initially calibrate with zooming disabled and then enable zooming at a later point, you should also recalibrate. + +## Best practices and considerations + +Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR. + +The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases 5 fps is sufficient, but if you plan to track faster moving objects, you may want to increase this slightly. Higher frame rates (> 10fps) will only slow down Frigate and the motion estimator and may lead to dropped frames, especially if you are using experimental zooming. + +A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. You can watch Frigate's debug viewer for your camera to see a thicker colored box around the object currently being autotracked. + +![Autotracking Debug View](/img/autotracking-debug.gif) + +A full-frame zone in `required_zones` is not recommended, especially if you've calibrated your camera and there are `movement_weights` defined in the configuration file. Frigate will continue to autotrack an object that has entered one of the `required_zones`, even if it moves outside of that zone. + +Some users have found it helpful to adjust the zone `inertia` value. See the [configuration reference](index.md). + +## Zooming + +Zooming is a very experimental feature and may use significantly more CPU when tracking objects than panning/tilting only. + +Absolute zooming makes zoom movements separate from pan/tilt movements. Most PTZ cameras will support absolute zooming. Absolute zooming was developed to be very conservative to work best with a variety of cameras and scenes. Absolute zooming usually will not occur until an object has stopped moving or is moving very slowly. + +Relative zooming attempts to make a zoom movement concurrently with any pan/tilt movements. It was tested to work with some Dahua and Amcrest PTZs. But the ONVIF specification indicates that there no assumption about how the generic zoom range is mapped to magnification, field of view or other physical zoom dimension when using relative zooming. So if relative zooming behavior is erratic or just doesn't work, try absolute zooming. + +You can optionally adjust the `zoom_factor` for your camera in your configuration file. Lower values will leave more space from the scene around the tracked object while higher values will cause your camera to zoom in more on the object. However, keep in mind that Frigate needs a fair amount of pixels and scene details outside of the bounding box of the tracked object to estimate the motion of your camera. If the object is taking up too much of the frame, Frigate will not be able to track the motion of the camera and your object will be lost. + +The range of this option is from 0.1 to 0.75. The default value of 0.3 is conservative and should be sufficient for most users. Because every PTZ and scene is different, you should experiment to determine what works best for you. + +## Usage applications + +In security and surveillance, it's common to use "spotter" cameras in combination with your PTZ. When your fixed spotter camera detects an object, you could use an automation platform like Home Assistant to move the PTZ to a specific preset so that Frigate can begin automatically tracking the object. For example: a residence may have fixed cameras on the east and west side of the property, capturing views up and down a street. When the spotter camera on the west side detects a person, a Home Assistant automation could move the PTZ to a camera preset aimed toward the west. When the object enters the specified zone, Frigate's autotracker could then continue to track the person as it moves out of view of any of the fixed cameras. + +## Troubleshooting and FAQ + +### The autotracker loses track of my object. Why? + +There are many reasons this could be the case. If you are using experimental zooming, your `zoom_factor` value might be too high, the object might be traveling too quickly, the scene might be too dark, there are not enough details in the scene (for example, a PTZ looking down on a driveway or other monotone background without a sufficient number of hard edges or corners), or the scene is otherwise less than optimal for Frigate to maintain tracking. + +Your camera's shutter speed may also be set too low so that blurring occurs with motion. Check your camera's firmware to see if you can increase the shutter speed. + +Watching Frigate's debug view can help to determine a possible cause. The autotracked object will have a thicker colored box around it. + +### I'm seeing an error in the logs that my camera "is still in ONVIF 'MOVING' status." What does this mean? + +There are two possible known reasons for this (and perhaps others yet unknown): a slow PTZ motor or buggy camera firmware. Frigate uses an ONVIF parameter provided by the camera, `MoveStatus`, to determine when the PTZ's motor is moving or idle. According to some users, Hikvision PTZs (even with the latest firmware), are not updating this value after PTZ movement. Unfortunately there is no workaround to this bug in Hikvision firmware, so autotracking will not function correctly and should be disabled in your config. This may also be the case with other non-Hikvision cameras utilizing Hikvision firmware. + +### I tried calibrating my camera, but the logs show that it is stuck at 0% and Frigate is not starting up. + +This is often caused by the same reason as above - the `MoveStatus` ONVIF parameter is not changing due to a bug in your camera's firmware. Also, see the note above: Frigate's web UI and all other cameras will be unresponsive while calibration is in progress. This is expected and normal. But if you don't see log entries every few seconds for calibration progress, your camera is not compatible with autotracking. + +### I'm seeing this error in the logs: "Autotracker: motion estimator couldn't get transformations". What does this mean? + +To maintain object tracking during PTZ moves, Frigate tracks the motion of your camera based on the details of the frame. If you are seeing this message, it could mean that your `zoom_factor` may be set too high, the scene around your detected object does not have enough details (like hard edges or color variatons), or your camera's shutter speed is too slow and motion blur is occurring. Try reducing `zoom_factor`, finding a way to alter the scene around your object, or changing your camera's shutter speed. + +### Calibration seems to have completed, but the camera is not actually moving to track my object. Why? + +Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever. diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index eb704358f..89dd440a2 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -80,8 +80,8 @@ cameras: rtmp: enabled: False # <-- RTMP should be disabled if your stream is not H264 detect: - width: # <---- update for your camera's resolution - height: # <---- update for your camera's resolution + width: # <- optional, by default Frigate tries to automatically detect resolution + height: # <- optional, by default Frigate tries to automatically detect resolution ``` ### Blue Iris RTSP Cameras @@ -108,21 +108,20 @@ According to [this discussion](https://github.com/blakeblackshear/frigate/issues ```yaml go2rtc: streams: - reolink: - - http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password - - "ffmpeg:reolink#audio=opus" - reolink_sub: - - http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password + your_reolink_camera: + - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus" + your_reolink_camera_sub: + - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password" cameras: - reolink: + your_reolink_camera: ffmpeg: inputs: - - path: rtsp://127.0.0.1:8554/reolink?video=copy&audio=aac + - path: rtsp://127.0.0.1:8554/your_reolink_camera input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/reolink_sub?video=copy + - path: rtsp://127.0.0.1:8554/your_reolink_camera_sub input_args: preset-rtsp-restream roles: - detect @@ -141,7 +140,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. @@ -151,3 +150,7 @@ ffmpeg: record: preset-record-ubiquiti rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead ``` + +### TP-Link VIGI Cameras + +TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded events. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`. diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 8f907cb3f..a95ffae86 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -1,6 +1,6 @@ --- id: cameras -title: Cameras +title: Camera Configuration --- ## Setting Up Camera Inputs @@ -11,11 +11,12 @@ A camera is enabled by default but can be temporarily disabled by using `enabled Each role can only be assigned to one input per camera. The options for roles are as follows: -| Role | Description | -| ---------- | ---------------------------------------------------------------------------------------- | -| `detect` | Main feed for object detection | -| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | -| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | +| Role | Description | +| -------- | ---------------------------------------------------------------------------------------- | +| `detect` | Main feed for object detection. [docs](object_detectors.md) | +| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | +| `audio` | Feed for audio based detection. [docs](audio_detectors.md) | +| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | ```yaml mqtt: @@ -33,8 +34,8 @@ cameras: roles: - record detect: - width: 1280 - height: 720 + width: 1280 # <- optional, by default Frigate tries to automatically detect resolution + height: 720 # <- optional, by default Frigate tries to automatically detect resolution ``` Additional cameras are simply added to the config under the `cameras` entry. @@ -51,13 +52,18 @@ For camera model specific settings check the [camera specific](camera_specific.m ## Setting up camera PTZ controls -Add onvif config to camera +:::caution + +Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to communicate with your camera. Check the [official list of ONVIF conformant products](https://www.onvif.org/conformant-products/), your camera documentation, or camera manufacturer's website to ensure your PTZ supports ONVIF. Also, ensure your camera is running the latest firmware. + +::: + +Add the onvif section to your camera in your configuration file: ```yaml cameras: back: - ffmpeg: - ... + ffmpeg: ... onvif: host: 10.0.10.10 port: 8000 @@ -65,4 +71,28 @@ cameras: password: password ``` -then PTZ controls will be available in the cameras WebUI. +If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI. + +An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. + +## ONVIF PTZ camera recommendations + +This list of working and non-working PTZ cameras is based on user feedback. + +| Brand or specific camera | PTZ Controls | Autotracking | Notes | +| ------------------------ | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking | +| Amcrest ASH21 | ❌ | ❌ | No ONVIF support | +| Ctronics PTZ | ✅ | ❌ | | +| Dahua | ✅ | ✅ | | +| Foscam R5 | ✅ | ❌ | | +| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | +| Reolink 511WA | ✅ | ❌ | Zoom only | +| Reolink E1 Pro | ✅ | ❌ | | +| Reolink E1 Zoom | ✅ | ❌ | | +| Reolink RLC-823A 16x | ✅ | ❌ | | +| Sunba 405-D20X | ✅ | ❌ | | +| Tapo C200 | ✅ | ❌ | Incomplete ONVIF support | +| Tapo C210 | ❌ | ❌ | Incomplete ONVIF support | +| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | +| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support | diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md index 66747350e..4715fae95 100644 --- a/docs/docs/configuration/ffmpeg_presets.md +++ b/docs/docs/configuration/ffmpeg_presets.md @@ -11,16 +11,20 @@ It is highly recommended to use hwaccel presets in the config. These presets not See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on how to setup hwaccel for your GPU / iGPU. -| Preset | Usage | Other Notes | -| --------------------- | ---------------------------- | ----------------------------------------------------- | -| preset-rpi-32-h264 | 32 bit Rpi with h264 stream | | -| preset-rpi-64-h264 | 64 bit Rpi with h264 stream | | -| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | -| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | -| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | -| preset-nvidia-h264 | Nvidia GPU with h264 stream | | -| preset-nvidia-h265 | Nvidia GPU with h265 stream | | -| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | +| Preset | Usage | Other Notes | +| --------------------- | ------------------------------ | ----------------------------------------------------- | +| preset-rpi-64-h264 | 64 bit Rpi with h264 stream | | +| preset-rpi-64-h265 | 64 bit Rpi with h265 stream | | +| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | +| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | +| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | +| preset-nvidia-h264 | Nvidia GPU with h264 stream | | +| preset-nvidia-h265 | Nvidia GPU with h265 stream | | +| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | +| preset-jetson-h264 | Nvidia Jetson with h264 stream | | +| preset-jetson-h265 | Nvidia Jetson with h265 stream | | +| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode | +| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with *-rk suffix and privileged mode | ### Input Args Presets diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md index 90ede4e54..7f5e69108 100644 --- a/docs/docs/configuration/hardware_acceleration.md +++ b/docs/docs/configuration/hardware_acceleration.md @@ -3,16 +3,25 @@ id: hardware_acceleration title: Hardware Acceleration --- +# Hardware Acceleration + It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro -### Raspberry Pi 3/4 +# Officially Supported + +## Raspberry Pi 3/4 Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). **NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. ```yaml +# if you want to decode a h264 stream ffmpeg: hwaccel_args: preset-rpi-64-h264 + +# if you want to decode a h265 (hevc) stream +ffmpeg: + hwaccel_args: preset-rpi-64-h265 ``` :::note @@ -21,17 +30,17 @@ If running Frigate in docker, you either need to run in priviliged mode or be su ```yaml docker run -d \ - --name frigate \ - ... - --device /dev/video10 \ - ghcr.io/blakeblackshear/frigate:stable +--name frigate \ +... +--device /dev/video10 \ +ghcr.io/blakeblackshear/frigate:stable ``` ::: -### Intel-based CPUs +## Intel-based CPUs -#### Via VAAPI +### Via VAAPI VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work. @@ -42,37 +51,36 @@ ffmpeg: **NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). -#### Via Quicksync (>=10th Generation only) +### Via Quicksync (>=10th Generation only) QSV must be set specifically based on the video encoding of the stream. -##### H.264 streams +#### H.264 streams ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h264 ``` -##### H.265 streams +#### H.265 streams ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h265 ``` -#### Configuring Intel GPU Stats in Docker +### Configuring Intel GPU Stats in Docker -Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. Three possible changes can be made: +Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options: 1. Run the container as privileged. -2. Adding the `CAP_PERFMON` capability. -3. Setting the `perf_event_paranoid` low enough to allow access to the performance event system. +2. Add the `CAP_PERFMON` capability (note: you might need to set the `perf_event_paranoid` low enough to allow access to the performance event system.) -##### Run as privileged +#### Run as privileged This method works, but it gives more permissions to the container than are actually needed. -###### Docker Compose - Privileged +##### Docker Compose - Privileged ```yaml services: @@ -82,7 +90,7 @@ services: privileged: true ``` -###### Docker Run CLI - Privileged +##### Docker Run CLI - Privileged ```bash docker run -d \ @@ -92,11 +100,11 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -##### CAP_PERFMON +#### CAP_PERFMON Only recent versions of Docker support the `CAP_PERFMON` capability. You can test to see if yours supports it by running: `docker run --cap-add=CAP_PERFMON hello-world` -###### Docker Compose - CAP_PERFMON +##### Docker Compose - CAP_PERFMON ```yaml services: @@ -107,7 +115,7 @@ services: - CAP_PERFMON ``` -###### Docker Run CLI - CAP_PERFMON +##### Docker Run CLI - CAP_PERFMON ```bash docker run -d \ @@ -117,15 +125,15 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -##### perf_event_paranoid +#### perf_event_paranoid _Note: This setting must be changed for the entire system._ For more information on the various values across different distributions, see https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do. -Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=1 >> /etc/sysctl.d/local.conf'` +Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'` -### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver +## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. @@ -136,7 +144,7 @@ ffmpeg: hwaccel_args: preset-vaapi ``` -### NVIDIA GPUs +## NVIDIA GPUs While older GPUs may work, it is recommended to use modern, supported GPUs. NVIDIA provides a [matrix of supported GPUs and features](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new). If your card is on the list and supports CUVID/NVDEC, it will most likely work with Frigate for decoding. However, you must also use [a driver version that will work with FFmpeg](https://github.com/FFmpeg/nv-codec-headers/blob/master/README). Older driver versions may be missing symbols and fail to work, and older cards are not supported by newer driver versions. The only way around this is to [provide your own FFmpeg](/configuration/advanced#custom-ffmpeg-build) that will work with your driver version, but this is unsupported and may not work well if at all. @@ -144,11 +152,11 @@ A more complete list of cards and their compatible drivers is available in the [ If your distribution does not offer NVIDIA driver packages, you can [download them here](https://www.nvidia.com/en-us/drivers/unix/). -#### Configuring Nvidia GPUs in Docker +### Configuring Nvidia GPUs in Docker Additional configuration is needed for the Docker container to be able to access the NVIDIA GPU. The supported method for this is to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker) and specify the GPU to Docker. How you do this depends on how Docker is being run: -##### Docker Compose - Nvidia GPU +#### Docker Compose - Nvidia GPU ```yaml services: @@ -165,7 +173,7 @@ services: capabilities: [gpu] ``` -##### Docker Run CLI - Nvidia GPU +#### Docker Run CLI - Nvidia GPU ```bash docker run -d \ @@ -175,7 +183,7 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -#### Setup Decoder +### Setup Decoder The decoder you need to pass in the `hwaccel_args` will depend on the input video. @@ -242,3 +250,133 @@ processes: If you do not see these processes, check the `docker logs` for the container and look for decoding errors. These instructions were originally based on the [Jellyfin documentation](https://jellyfin.org/docs/general/administration/hardware-acceleration.html#nvidia-hardware-acceleration-on-docker-linux). + +# Community Supported + +## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano) + +A separate set of docker images is available that is based on Jetpack/L4T. They comes with an `ffmpeg` build +with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the +`frigate-tensorrt-jp4` image, or if your Jetson host is running Jetpack 5.0+, use the `frigate-tensorrt-jp5` +image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, +but the image will still allow hardware decoding and tensorrt object detection. + +You will need to use the image with the nvidia container runtime: + +### Docker Run CLI - Jetson + +```bash +docker run -d \ + ... + --runtime nvidia + ghcr.io/blakeblackshear/frigate-tensorrt-jp5 +``` + +### Docker Compose - Jetson + +```yaml +version: '2.4' +services: + frigate: + ... + image: ghcr.io/blakeblackshear/frigate-tensorrt-jp5 + runtime: nvidia # Add this +``` + +:::note + +The `runtime:` tag is not supported on older versions of docker-compose. If you run into this, you can instead use the nvidia runtime system-wide by adding `"default-runtime": "nvidia"` to `/etc/docker/daemon.json`: + +``` +{ + "runtimes": { + "nvidia": { + "path": "nvidia-container-runtime", + "runtimeArgs": [] + } + }, + "default-runtime": "nvidia" +} +``` + +::: + +### Setup Decoder + +The decoder you need to pass in the `hwaccel_args` will depend on the input video. + +A list of supported codecs (you can use `ffmpeg -decoders | grep nvmpi` in the container to get the ones your card supports) + +``` + V..... h264_nvmpi h264 (nvmpi) (codec h264) + V..... hevc_nvmpi hevc (nvmpi) (codec hevc) + V..... mpeg2_nvmpi mpeg2 (nvmpi) (codec mpeg2video) + V..... mpeg4_nvmpi mpeg4 (nvmpi) (codec mpeg4) + V..... vp8_nvmpi vp8 (nvmpi) (codec vp8) + V..... vp9_nvmpi vp9 (nvmpi) (codec vp9) +``` + +For example, for H264 video, you'll select `preset-jetson-h264`. + +```yaml +ffmpeg: + hwaccel_args: preset-jetson-h264 +``` + +If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption. +Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show +that NVDEC/NVDEC1 are in use. + +## Rockchip platform + +Hardware accelerated video de-/encoding is supported on all Rockchip SoCs. + +### Setup + +Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. + +### Configuration + +Add one of the following ffmpeg presets to your `config.yaml` to enable hardware acceleration: + +```yaml +# if you try to decode a h264 encoded stream +ffmpeg: + hwaccel_args: preset-rk-h264 + +# if you try to decode a h265 (hevc) encoded stream +ffmpeg: + hwaccel_args: preset-rk-h265 +``` + +:::note + +Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. + +::: + +### go2rtc presets for hardware accelerated transcoding + +If your input stream is to be transcoded using hardware acceleration, there are these presets for go2rtc: `h264/rk` and `h265/rk`. You can use them this way: + +``` +go2rtc: + streams: + Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264/rk + Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265/rk +``` + +:::warning + +The go2rtc docs may suggest the following configuration: + +``` +go2rtc: + streams: + Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264#hardware=rk + Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265#hardware=rk +``` + +However, this does not currently work. + +::: diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index f23a32270..c022d57c2 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -1,6 +1,6 @@ --- id: index -title: Configuration File +title: Frigate Configuration Reference --- For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. @@ -19,31 +19,15 @@ cameras: - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 roles: - detect - detect: - width: 1280 - height: 720 ``` ### VSCode Configuration Schema VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. -### Full configuration reference: +### Environment Variable Substitution -:::caution - -It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. - -::: - -**Note:** The following values will be replaced at runtime by using environment variables - -- `{FRIGATE_MQTT_USER}` -- `{FRIGATE_MQTT_PASSWORD}` -- `{FRIGATE_RTSP_USER}` -- `{FRIGATE_RTSP_PASSWORD}` - -for example: +Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the configuration reference below. For example, the following values can be replaced at runtime by using environment variables: ```yaml mqtt: @@ -63,6 +47,14 @@ onvif: password: "{FRIGATE_RTSP_PASSWORD}" ``` +### Full configuration reference: + +:::caution + +It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. + +::: + ```yaml mqtt: # Optional: Enable mqtt server (default: shown below) @@ -78,11 +70,11 @@ mqtt: # NOTE: must be unique if you are running multiple instances client_id: frigate # Optional: user - # NOTE: MQTT user can be specified with an environment variables that must begin with 'FRIGATE_'. + # NOTE: MQTT user can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'. # e.g. user: '{FRIGATE_MQTT_USER}' user: mqtt_user # Optional: password - # NOTE: MQTT password can be specified with an environment variables that must begin with 'FRIGATE_'. + # NOTE: MQTT password can be specified with an environment variables or docker secrets that must begin with 'FRIGATE_'. # e.g. password: '{FRIGATE_MQTT_PASSWORD}' password: password # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) @@ -104,7 +96,7 @@ detectors: # Required: name of the detector detector_name: # Required: type of the detector - # Frigate provided types include 'cpu', 'edgetpu', and 'openvino' (default: shown below) + # Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below) # Additional detector types can also be plugged in. # Detectors may require additional configuration. # Refer to the Detectors configuration page for more information. @@ -145,12 +137,25 @@ audio: enabled: False # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) max_not_heard: 30 + # Optional: Configure the min rms volume required to run audio detection (default: shown below) + # As a rule of thumb: + # - 200 - high sensitivity + # - 500 - medium sensitivity + # - 1000 - low sensitivity + min_volume: 500 # Optional: Types of audio to listen for (default: shown below) listen: - bark + - fire_alarm - scream - speech - yell + # Optional: Filters to configure detection. + filters: + # Label that matches label in listen config. + speech: + # Minimum score that triggers an audio event (default: shown below) + threshold: 0.8 # Optional: logger verbosity settings logger: @@ -212,15 +217,17 @@ ffmpeg: # Optional: Detect configuration # NOTE: Can be overridden at the camera level detect: - # Optional: width of the frame for the input with the detect role (default: shown below) + # Optional: width of the frame for the input with the detect role (default: use native stream resolution) width: 1280 - # Optional: height of the frame for the input with the detect role (default: shown below) + # Optional: height of the frame for the input with the detect role (default: use native stream resolution) height: 720 # Optional: desired fps for your camera for the input with the detect role (default: shown below) # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. fps: 5 # Optional: enables detection for the camera (default: True) enabled: True + # Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate) + min_initialized: 2 # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) max_disappeared: 25 # Optional: Configuration for stationary object tracking @@ -314,7 +321,7 @@ motion: # Low values will cause things like moving shadows to be detected as motion for longer. # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ frame_alpha: 0.01 - # Optional: Height of the resized motion frame (default: 50) + # Optional: Height of the resized motion frame (default: 100) # Higher values will result in more granular motion detection at the expense of higher CPU usage. # Lower values result in less CPU, but small changes may not register as motion. frame_height: 100 @@ -338,6 +345,8 @@ record: # Optional: Number of minutes to wait between cleanup runs (default: shown below) # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o expire_interval: 60 + # Optional: Sync recordings with disk on startup and once a day (default: shown below). + sync_recordings: False # Optional: Retention settings for recording retain: # Optional: Number of days to retain recordings regardless of events (default: shown below) @@ -350,6 +359,16 @@ record: # active_objects - save all recording segments with active/moving objects # NOTE: this mode only applies when the days setting above is greater than 0 mode: all + # Optional: Recording Export Settings + export: + # Optional: Timelapse Output Args (default: shown below). + # NOTE: The default args are set to fit 24 hours of recording into 1 hour playback. + # See https://stackoverflow.com/a/58268695 for more info on how these args work. + # As an example: if you wanted to go from 24 hours to 30 minutes that would be going + # from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02. + # The -r (framerate) dictates how smooth the output video is. + # So the args would be -vf setpts=0.02*PTS -r 30 in that case. + timelapse_args: "-vf setpts=0.04*PTS -r 30" # Optional: Event recording settings events: # Optional: Number of seconds before the event to include (default: shown below) @@ -403,6 +422,8 @@ snapshots: # Optional: Per object retention days objects: person: 15 + # Optional: quality of the encoded jpeg, 0-100 (default: shown below) + quality: 70 # Optional: RTMP configuration # NOTE: RTMP is deprecated in favor of restream @@ -412,7 +433,7 @@ rtmp: enabled: False # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.5.0) +# Uses https://github.com/AlexxIT/go2rtc (v1.8.3) go2rtc: # Optional: jsmpeg stream configuration for WebUI @@ -465,7 +486,7 @@ cameras: # Required: A list of input streams for the camera. See documentation for more information. inputs: # Required: the path to the stream - # NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {} + # NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp # NOTICE: In addition to assigning the audio, record, and rtmp roles, @@ -494,6 +515,9 @@ cameras: # to be replaced by a newer image. (default: shown below) best_image_timeout: 60 + # Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera. + webui_url: "" + # Optional: zones for this camera zones: # Required: name of the zone @@ -503,7 +527,7 @@ cameras: # Required: List of x,y coordinates to define the polygon of the zone. # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. coordinates: 545,1077,747,939,788,805 - # Optional: Number of consecutive frames required for object to be considered present in the zone. Allowed values are 1-10 (default: shown below) + # Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below). inertia: 3 # Optional: List of objects that can trigger this zone (default: all tracked objects) objects: @@ -555,6 +579,40 @@ cameras: user: admin # Optional: password for login. password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: calibrate the camera on startup (default: shown below) + # A calibration will move the PTZ in increments and measure the time it takes to move. + # The results are used to help estimate the position of tracked objects after a camera move. + # Frigate will update your config file automatically after a calibration with + # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. + calibrate_on_startup: False + # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) + # Available options are: disabled, absolute, and relative + # disabled - don't zoom in/out on autotracked objects, use pan/tilt only + # absolute - use absolute zooming (supported by most PTZ capable cameras) + # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) + zooming: disabled + # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) + # A lower value will keep more of the scene in view around a tracked object. + # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. + # The value should be between 0.1 and 0.75 + zoom_factor: 0.3 + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 + # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) + movement_weights: [] # Optional: Configuration for how to sort the cameras in the Birdseye view. birdseye: @@ -596,7 +654,7 @@ ui: # Optional: Telemetry configuration telemetry: - # Optional: Enabled network interfaces for bandwidth stats monitoring (default: shown below) + # Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all) network_interfaces: - eth - enp @@ -611,6 +669,7 @@ telemetry: # Enable Intel GPU stats (default: shown below) intel_gpu_stats: True # Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below) + # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. network_bandwidth: False # Optional: Enable the latest version outbound check (default: shown below) # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 697b11347..003e7599c 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -9,11 +9,11 @@ Frigate has different live view options, some of which require the bundled `go2r Live view options can be selected while viewing the live stream. The options are: -| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | -| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | -------------------------------------------- | -| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | -| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only | -| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | +| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | +| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------ | +| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | +| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only | +| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | ### Audio Support @@ -37,12 +37,12 @@ There may be some cameras that you would prefer to use the sub stream for live v ```yaml go2rtc: streams: - rtsp_cam: + test_cam: - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - rtsp_cam_sub: + - "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc + test_cam_sub: - rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. - - "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + - "ffmpeg:test_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc cameras: test_cam: @@ -59,7 +59,7 @@ cameras: roles: - detect live: - stream_name: rtsp_cam_sub + stream_name: test_cam_sub ``` ### WebRTC extra configuration: @@ -78,7 +78,7 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req - 192.168.1.10:8555 - stun:8555 ``` - + - For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.0.0.0/8` CIDR block. :::tip @@ -104,6 +104,7 @@ If you are having difficulties getting WebRTC to work and you are running Frigat If not running in host mode, port 8555 will need to be mapped for the container: docker-compose.yml + ```yaml services: frigate: @@ -115,4 +116,4 @@ services: ::: -See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-webrtc) for more information about this. +See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this. diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md new file mode 100644 index 000000000..f3d1d7692 --- /dev/null +++ b/docs/docs/configuration/motion_detection.md @@ -0,0 +1,103 @@ +--- +id: motion_detection +title: Motion Detection +--- + +# Tuning Motion Detection + +Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection. + +Once motion is detected, it tries to group up nearby areas of motion together in hopes of identifying a rectangle in the image that will capture the area worth inspecting. These are the red "motion boxes" you see in the debug viewer. + +## The Goal + +The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras. + +Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events. + +## Create Motion Masks + +First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). + +## Prepare For Testing + +The easiest way to tune motion detection is to do it live, have one window / screen open with the frigate debug view and motion boxes enabled with another window / screen open allowing for configuring the motion settings. It is recommended to use Home Assistant or MQTT as they offer live configuration of some motion settings meaning that Frigate does not need to be restarted when values are changed. + +In Home Assistant the `Improve Contrast`, `Contour Area`, and `Threshold` configuration entities are disabled by default but can easily be enabled and used to tune live, otherwise MQTT can be used. + +## Tuning Motion Detection During The Day + +Now that things are set up, find a time to tune that represents normal circumstances. For example, if you tune your motion on a day that is sunny and windy you may find later that the motion settings are not sensitive enough on a cloudy and still day. + +:::note + +Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected. + +::: + +### Threshold + +The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. + +```yaml +# default threshold value +motion: + # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) + # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. + # The value should be between 1 and 255. + threshold: 30 +``` + +Lower values mean motion detection is more sensitive to changes in color, making it more likely for example to detect motion when a brown dogs blends in with a brown fence or a person wearing a red shirt blends in with a red car. If the threshold is too low however, it may detect things like grass blowing in the wind, shadows, etc. to be detected as motion. + +Watching the motion boxes in the debug view, increase the threshold until you only see motion that is visible to the eye. Once this is done, it is important to test and ensure that desired motion is still detected. + +### Contour Area + +```yaml +# default contour_area value +motion: + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) + # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will + # make motion detection more sensitive to smaller moving objects. + # As a rule of thumb: + # - 10 - high sensitivity + # - 30 - medium sensitivity + # - 50 - low sensitivity + contour_area: 10 +``` + +Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. + +Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. + +### Improve Contrast + +At this point if motion is working as desired there is no reason to continue with tuning for the day. If you were unable to find a balance between desired and undesired motion being detected, you can try disabling improve contrast and going back to the threshold and contour area steps. + +## Tuning Motion Detection During The Night + +Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. + +However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. + +## Tuning For Large Changes In Motion + +```yaml +# default lightning_threshold: +motion: + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection + # needs to recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching + # a doorbell camera. + lightning_threshold: 0.8 +``` + +:::tip + +Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed. + +::: + +Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index d684a2917..8de8db192 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -3,7 +3,9 @@ id: object_detectors title: Object Detectors --- -Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. +# Officially Supported Detectors + +Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. ## CPU Detector (not recommended) @@ -42,8 +44,6 @@ detectors: coral: type: edgetpu device: usb - model: - path: "/custom_model.tflite" ``` ### Multiple USB Corals @@ -174,7 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t ### Minimum Hardware Support -The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. +The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=530`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. @@ -194,7 +194,9 @@ The model used for TensorRT must be preprocessed on the same hardware platform t The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. -To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. +By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. + +If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU. If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. @@ -233,10 +235,18 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol ```yml frigate: environment: - - YOLO_MODELS="yolov4-608,yolov7x-640" + - YOLO_MODELS=yolov4-608,yolov7x-640 - USE_FP16=false ``` +If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU. + +```yml +frigate: + environment: + - TRT_MODEL_PREP_DEVICE=0 # Optionally, select which GPU is used for model optimization +``` + ### Configuration Parameters The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. @@ -250,11 +260,11 @@ detectors: device: 0 #This is the default, select the first GPU model: - path: /config/model_cache/tensorrt/yolov7-tiny-416.trt + path: /config/model_cache/tensorrt/yolov7-320.trt input_tensor: nchw input_pixel_format: rgb - width: 416 - height: 416 + width: 320 + height: 320 ``` ## Deepstack / CodeProject.AI Server Detector @@ -277,4 +287,105 @@ detectors: Replace `` and `` with the IP address and port of your CodeProject.AI server. -To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. \ No newline at end of file +To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. + + +# Community Supported Detectors + +## Rockchip RKNN-Toolkit-Lite2 + +This detector is only available if one of the following Rockchip SoCs is used: +- RK3588/RK3588S +- RK3568 +- RK3566 +- RK3562 + +These SoCs come with a NPU that will highly speed up detection. + +### Setup + +Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. + +### Configuration + +This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional. + +```yaml +detectors: # required + rknn: # required + type: rknn # required + # core mask for npu + core_mask: 0 + +model: # required + # name of yolov8 model or path to your own .rknn model file + # possible values are: + # - default-yolov8n + # - default-yolov8s + # - default-yolov8m + # - default-yolov8l + # - default-yolov8x + # - /config/model_cache/rknn/your_custom_model.rknn + path: default-yolov8n + # width and height of detection frames + width: 320 + height: 320 + # pixel format of detection frame + # default value is rgb but yolov models usually use bgr format + input_pixel_format: bgr # required + # shape of detection frame + input_tensor: nhwc +``` + +Explanation for rknn specific options: +- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: + - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value. + - `core_mask: 0b001` use only core0. + - `core_mask: 0b011` use core0 and core1. + - `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled. + +### Choosing a model + +There are 5 default yolov8 models that differ in size and therefore load the NPU more or less. In ascending order, with the top one being the smallest and least computationally intensive model: + +| Model | Size in mb | +| ------- | ---------- | +| yolov8n | 9 | +| yolov8s | 25 | +| yolov8m | 54 | +| yolov8l | 90 | +| yolov8x | 136 | + +:::tip + +You can get the load of your NPU with the following command: + +```bash +$ cat /sys/kernel/debug/rknpu/load +>> NPU load: Core0: 0%, Core1: 0%, Core2: 0%, +``` + +::: + +- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary. +- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option. + - If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`. + - If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system. +- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this: +```yaml +model: + path: /config/model_cache/rknn/my-rknn-model.rknn +``` + +:::tip + +When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 cores using: + +```yaml +detectors: + rknn: + type: rknn + core_mask: 0b111 +``` + +::: diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 29a2fb36b..5a505c6d1 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -84,3 +84,18 @@ record: ## How do I export recordings? The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a timelapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. + +## Syncing Recordings With Disk + +In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. + +```yaml +record: + sync_recordings: True +``` + +:::warning + +The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. + +::: diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index 61393a91c..23c09b2d9 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.5.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#configuration) for more advanced configurations and features. :::note @@ -17,7 +17,12 @@ You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which ca ### Birdseye Restream -Birdseye RTSP restream can be enabled at `birdseye -> restream` and accessed at `rtsp://:8554/birdseye`. Enabling the restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. +Birdseye RTSP restream can be accessed at `rtsp://:8554/birdseye`. Enabling the birdseye restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. + +```yaml +birdseye: + restream: true +``` ### Securing Restream With Authentication @@ -28,8 +33,7 @@ go2rtc: rtsp: username: "admin" password: "pass" - streams: - ... + streams: ... ``` **NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras. @@ -49,31 +53,31 @@ One connection is made to the camera. One for the restream, `detect` and `record ```yaml go2rtc: streams: - rtsp_cam: # <- for RTSP streams + name_your_rtsp_cam: # <- for RTSP streams - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) - http_cam: # <- for other streams + - "ffmpeg:name_your_rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) + name_your_http_cam: # <- for other streams - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio - - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) + - "ffmpeg:name_your_http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) cameras: - rtsp_cam: + name_your_rtsp_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - detect - audio # <- only necessary if audio detection is enabled - http_cam: + name_your_http_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record @@ -88,44 +92,44 @@ Two connections are made to the camera. One for the sub stream, one for the rest ```yaml go2rtc: streams: - rtsp_cam: + name_your_rtsp_cam: - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - rtsp_cam_sub: + - "ffmpeg:name_your_rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_rtsp_cam_sub: - rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus - http_cam: + - "ffmpeg:name_your_rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_http_cam: - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - http_cam_sub: + - "ffmpeg:name_your_http_cam#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_http_cam_sub: - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:http_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + - "ffmpeg:name_your_http_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus cameras: - rtsp_cam: + name_your_rtsp_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream input_args: preset-rtsp-restream roles: - audio # <- only necessary if audio detection is enabled - detect - http_cam: + name_your_http_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam_sub # <--- the name here must match the name of the camera_sub in restream input_args: preset-rtsp-restream roles: - audio # <- only necessary if audio detection is enabled @@ -134,7 +138,7 @@ cameras: ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: NOTE: The output will need to be passed with two curly braces `{{output}}` diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index f8e463605..daca1786a 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -56,3 +56,27 @@ camera: ``` Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street. + +### Zone Inertia + +Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. This value can be configured: + +```yaml +camera: + zones: + front_yard: + inertia: 3 + objects: + - person +``` + +There may also be cases where you expect an object to quickly enter and exit a zone, like when a car is pulling into the driveway, and you may want to have the object be considered present in the zone immediately: + +```yaml +camera: + zones: + driveway_entrance: + inertia: 1 + objects: + - car +``` diff --git a/docs/docs/development/contributing-boards.md b/docs/docs/development/contributing-boards.md new file mode 100644 index 000000000..49a65722d --- /dev/null +++ b/docs/docs/development/contributing-boards.md @@ -0,0 +1,94 @@ +--- +id: contributing-boards +title: Community Supported Boards +--- + +## About Community Supported Boards + +There are many SBCs (small board computers) that have a passionate community behind them, Jetson Nano for example. These SBCs often have dedicated hardware that can greatly accelerate Frigate's AI and video workloads, but this hardware requires very specific frameworks for interfacing with it. + +This means it would be very difficult for Frigate's maintainers to support these different boards especially given the relatively low userbase. + +The community support boards framework allows a user in the community to be the codeowner to add support for an SBC or other detector by providing the code, maintenance, and user support. + +## Getting Started + +1. Follow the steps from [the main contributing docs](/development/contributing.md). +2. Create a new build type under `docker/` +3. Get build working as expected, all board-specific changes should be done inside of the board specific docker file. + +## Required Structure + +Each board will have different build requirements, run on different architectures, etc. however there are set of files that all boards will need. + +### Bake File .hcl + +The `board.hcl` file is what allows the community boards build to be built using the main build as a cache. This enables a clean base and quicker build times. For more information on the format and options available in the Bake file, [see the official Buildx Bake docs](https://docs.docker.com/build/bake/reference/) + +### Board Make File + +The `board.mk` file is what allows automated and configurable Make targets to be included in the main Make file. Below is the general format for this file: + +```Makefile +BOARDS += board # Replace `board` with the board suffix ex: rpi + +local-rpi: version + docker buildx bake --load --file=docker/board/board.hcl --set board.tags=frigate:latest-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board + +build-rpi: version + docker buildx bake --file=docker/board/board.hcl --set board.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board + +push-rpi: build-rpi + docker buildx bake --push --file=docker/board/board.hcl --set board.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board +``` + +### Dockerfile + +The `Dockerfile` is what orchestrates the build, this will vary greatly depending on the board but some parts are required for things to work. Below are the required parts of the Dockerfile: + +```Dockerfile +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +# All board-specific work should be done with `deps` as the base +FROM deps AS board-deps + +# do stuff specific +# to the board + +# set workdir +WORKDIR /opt/frigate/ + +# copies base files from the main frigate build +COPY --from=rootfs / / +``` + +## Other Required Changes + +### CI/CD + +The images for each board will be built for each Frigate release, this is done in the `.github/workflows/ci.yml` file. The board build workflow will need to be added here. + +```yml + - name: Build and push board build + uses: docker/bake-action@v3 + with: + push: true + targets: board # this is the target in the board.hcl file + files: docker/board/board.hcl # this should be updated with the actual board type + # the tags should be updated with the actual board types as well + # the community board builds should never push to cache, but it can pull from cache + set: | + board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board + *.cache-from=type=gha +``` + +### Code Owner File + +The `CODEOWNERS` file should be updated to include the `docker/board` along with `@user` for each user that is a code owner of this board + +# Docs + +At a minimum the `installation`, `object_detectors`, `hardware_acceleration`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board. diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index 0955af56a..bc08afbc9 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -1,6 +1,6 @@ --- id: contributing -title: Contributing +title: Contributing To The Main Code Base --- ## Getting the source @@ -68,10 +68,6 @@ cameras: input_args: -re -stream_loop -1 -fflags +genpts roles: - detect - detect: - height: 1080 - width: 1920 - fps: 5 ``` These input args tell ffmpeg to read the mp4 file in an infinite loop. You can use any valid ffmpeg input here. @@ -99,18 +95,24 @@ The following commands are used inside the container to ensure hardware accelera **Raspberry Pi (64bit)** -This should show <50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`. +This should show less than 50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`. ```shell ffmpeg -c:v h264_v4l2m2m -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null ``` -**NVIDIA** +**NVIDIA GPU** ```shell ffmpeg -c:v h264_cuvid -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null ``` +**NVIDIA Jetson** + +```shell +ffmpeg -c:v h264_nvmpi -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null +``` + **VAAPI** ```shell @@ -129,7 +131,7 @@ ffmpeg -c:v h264_qsv -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/ - [Frigate source code](#frigate-core-web-and-docs) - All [core](#core) prerequisites _or_ another running Frigate instance locally available -- Node.js 16 +- Node.js 20 ### Making changes @@ -153,10 +155,6 @@ cd web && npm install cd web && npm run dev ``` -#### 3a. Run the development server against a non-local instance - -To run the development server against a non-local instance, you will need to modify the API_HOST default return in `web/src/env.js`. - #### 4. Making changes The Web UI is built using [Vite](https://vitejs.dev/), [Preact](https://preactjs.com), and [Tailwind CSS](https://tailwindcss.com). @@ -185,7 +183,7 @@ npm run test ### Prerequisites - [Frigate source code](#frigate-core-web-and-docs) -- Node.js 16 +- Node.js 20 ### Making changes @@ -203,7 +201,7 @@ npm run start This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server. -The docs are built using [Docusaurus v2](https://v2.docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation. +The docs are built using [Docusaurus v3](https://docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation. #### 3. Build (optional) diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 5daf8fe3b..0df4b2349 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -9,7 +9,7 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options. -Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-410520-possibly-others). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. +Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. Here are some of the camera's I recommend: @@ -31,7 +31,7 @@ My current favorite is the Minisforum GK41 because of the dual NICs that allow y ## Detectors -A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the epectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically. As of 0.12, Frigate supports a handful of different detector types with varying inference speeds and performance. +A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the expectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically. As of 0.12, Frigate supports a handful of different detector types with varying inference speeds and performance. ### Google Coral TPU @@ -70,7 +70,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known | Intel i5 1135G7 | 10 - 15 ms | | | Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms | -### TensorRT +### TensorRT - Nvidia GPU The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). @@ -87,6 +87,24 @@ Inference speeds will vary greatly depending on the GPU and the model used. | Quadro P400 2GB | 20 - 25 ms | | Quadro P2000 | ~ 12 ms | +### Community Supported: + +#### Nvidia Jetson + +Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). + +Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. + +#### Rockchip SoC + +Frigate supports SBCs with the following Rockchip SoCs: +- RK3566/RK3568 +- RK3588/RK3588S +- RV1103/RV1106 +- RK3562 + +Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms. + ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 1397f8741..93b2cfe9c 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -25,8 +25,8 @@ Frigate uses the following locations for read/write operations in the container. - `/media/frigate/clips`: Used for snapshot storage. In the future, it will likely be renamed from `clips` to `snapshots`. The file structure here cannot be modified and isn't intended to be browsed or managed manually. - `/media/frigate/recordings`: Internal system storage for recording segments. The file structure here cannot be modified and isn't intended to be browsed or managed manually. - `/media/frigate/exports`: Storage for clips and timelapses that have been exported via the WebUI or API. -- `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder. -- `/dev/shm`: It is not recommended to modify this directory or map it with docker. This is the location for raw decoded frames in shared memory and it's size is impacted by the `shm-size` calculations below. +- `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder. Segments generated via the `clip.mp4` endpoints are also concatenated and processed here. It is recommended to use a [`tmpfs`](https://docs.docker.com/storage/tmpfs/) mount for this. +- `/dev/shm`: Internal cache for raw decoded frames in shared memory. It is not recommended to modify this directory or map it with docker. The minimum size is impacted by the `shm-size` calculations below. #### Common docker compose storage configurations @@ -51,7 +51,7 @@ services: Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. -The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size. +The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). The Frigate container also stores logs in shm, which can take up to **30MB**, so make sure to take this into account in your math as well. @@ -72,7 +72,6 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576) The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration. - ### Raspberry Pi 3/4 By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options). @@ -81,7 +80,7 @@ Additionally, the USB Coral draws a considerable amount of power. If using any o ## Docker -Running in Docker with compose is the recommended install method: +Running in Docker with compose is the recommended install method. ```yaml version: "3.9" @@ -134,12 +133,27 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -## Home Assistant Operating System (HassOS) +The official docker image tags for the current stable version are: + +- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64 +- `stable-standard-arm64` - Standard Frigate build for arm64 +- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU + +The community supported docker image tags for the current stable version are: + +- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5 +- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6 +- `stable-rk` - Frigate build for SBCs with Rockchip SoC + +## Home Assistant Addon :::caution +As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. + There are important limitations in Home Assistant Operating System to be aware of: -- Utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46). + +- Separate local storage for media is not yet supported by Home Assistant - AMD GPUs are not supported because HA OS does not include the mesa driver. - Nvidia GPUs are not supported because addons do not support the nvidia runtime. @@ -147,7 +161,7 @@ There are important limitations in Home Assistant Operating System to be aware o :::tip -If possible, it is recommended to run Frigate standalone in Docker and use [Frigate's Proxy Addon](https://github.com/blakeblackshear/frigate-hass-addons/blob/main/frigate_proxy/README.md). +See [the network storage guide](/guides/ha_network_storage.md) for instructions to setup network storage for frigate. ::: @@ -170,32 +184,6 @@ There are several versions of the addon available: | Frigate NVR Beta | Beta release with protection mode on | | Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode | -## Home Assistant Supervised - -:::caution - -There are important limitations in Home Assistant Supervised to be aware of: -- Nvidia GPUs are not supported because addons do not support the nvidia runtime. - -::: - -:::tip - -If possible, it is recommended to run Frigate standalone in Docker and use [Frigate's Proxy Addon](https://github.com/blakeblackshear/frigate-hass-addons/blob/main/frigate_proxy/README.md). - -::: - -When running Home Assistant with the [Supervised install method](https://github.com/home-assistant/supervised-installer), you can get the benefit of running the Addon along with the ability to customize the storage used by Frigate. - -In order to customize the storage location for Frigate, simply use `fstab` to mount the drive you want at `/usr/share/hassio/media`. Here is an example fstab entry: - -```shell -UUID=1a65fec6-c25f-404a-b3d2-1f2fcf6095c8 /media/data ext4 defaults 0 0 -/media/data/homeassistant/media /usr/share/hassio/media none bind 0 0 -``` - -Then follow the instructions listed for [Home Assistant Operating System](#home-assistant-operating-system-hassos). - ## Kubernetes Use the [helm chart](https://github.com/blakeblackshear/blakeshome-charts/tree/master/charts/frigate). @@ -209,15 +197,16 @@ To install make sure you have the [community app plugin here](https://forums.unr It is recommended to run Frigate in LXC for maximum performance. See [this discussion](https://github.com/blakeblackshear/frigate/discussions/1111) for more information. -## ESX +## ESXi -For details on running Frigate under ESX, see details [here](https://github.com/blakeblackshear/frigate/issues/305). +For details on running Frigate using ESXi, please see the instructions [here](https://williamlam.com/2023/05/frigate-nvr-with-coral-tpu-igpu-passthrough-using-esxi-on-intel-nuc.html). + +If you're running Frigate on a rack mounted server and want to passthough the Google Coral, [read this.](https://github.com/blakeblackshear/frigate/issues/305) ## Synology NAS on DSM 7 These settings were tested on DSM 7.1.1-42962 Update 4 - **General:** The `Execute container using high privilege` option needs to be enabled in order to give the frigate container the elevated privileges it may need. @@ -226,14 +215,12 @@ The `Enable auto-restart` option can be enabled if you want the container to aut ![image](https://user-images.githubusercontent.com/4516296/232586790-0b659a82-561d-4bc5-899b-0f5b39c6b11d.png) - **Advanced Settings:** If you want to use the password template feature, you should add the "FRIGATE_RTSP_PASSWORD" environment variable and set it to your preferred password under advanced settings. The rest of the environment variables should be left as default for now. ![image](https://user-images.githubusercontent.com/4516296/232587163-0eb662d4-5e28-4914-852f-9db1ec4b9c3d.png) - **Port Settings:** The network mode should be set to `bridge`. You need to map the default frigate container ports to your local Synology NAS ports that you want to use to access Frigate. @@ -242,7 +229,6 @@ There may be other services running on your NAS that are using the same ports th ![image](https://user-images.githubusercontent.com/4516296/232582642-773c0e37-7ef5-4373-8ce3-41401b1626e6.png) - **Volume Settings:** You need to configure 2 paths: @@ -256,14 +242,15 @@ You need to configure 2 paths: These instructions were tested on a QNAP with an Intel J3455 CPU and 16G RAM, running QTS 4.5.4.2117. -QNAP has a graphic tool named Container Station to intall and manage docker containers. However, there are two limitations with Container Station that make it unsuitable to install Frigate: +QNAP has a graphic tool named Container Station to install and manage docker containers. However, there are two limitations with Container Station that make it unsuitable to install Frigate: 1. Container Station does not incorporate GitHub Container Registry (ghcr), which hosts Frigate docker image version 0.12.0 and above. -2. Container Station uses default 64 Mb shared memory size (shm-size), and does not have a mechanism to adjust it. Frigate requires a larger shm-size to be able to work properly with more than two high resolution cameras. +2. Container Station uses default 64 Mb shared memory size (shm-size), and does not have a mechanism to adjust it. Frigate requires a larger shm-size to be able to work properly with more than two high resolution cameras. -Because of above limitations, the installation has to be done from command line. Here are the steps: +Because of above limitations, the installation has to be done from command line. Here are the steps: **Preparation** + 1. Install Container Station from QNAP App Center if it is not installed. 2. Enable ssh on your QNAP (please do an Internet search on how to do this). 3. Prepare Frigate config file, name it `config.yml`. @@ -274,7 +261,8 @@ Because of above limitations, the installation has to be done from command line. **Installation** Run the following commands to install Frigate (using `stable` version as example): -```bash + +```shell # Download Frigate image docker pull ghcr.io/blakeblackshear/frigate:stable # Create directory to host Frigate config file on QNAP file system. @@ -315,6 +303,4 @@ docker run \ ghcr.io/blakeblackshear/frigate:stable ``` -Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page. - - +Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page. diff --git a/docs/docs/frigate/video_pipeline.md b/docs/docs/frigate/video_pipeline.md new file mode 100644 index 000000000..313e27ed5 --- /dev/null +++ b/docs/docs/frigate/video_pipeline.md @@ -0,0 +1,67 @@ +--- +id: video_pipeline +title: Video pipeline +--- + +Frigate uses a sophisticated video pipeline that starts with the camera feed and progressively applies transformations to it (e.g. decoding, motion detection, etc.). + +This guide provides an overview to help users understand some of the key Frigate concepts. + +## Overview + +At a high level, there are five processing steps that could be applied to a camera feed + +```mermaid +%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%% + +flowchart LR + Feed(Feed\nacquisition) --> Decode(Video\ndecoding) + Decode --> Motion(Motion\ndetection) + Motion --> Object(Object\ndetection) + Feed --> Recording(Recording\nand\nvisualization) + Motion --> Recording + Object --> Recording +``` + +As the diagram shows, all feeds first need to be acquired. Depending on the data source, it may be as simple as using FFmpeg to connect to an RTSP source via TCP or something more involved like connecting to an Apple Homekit camera using go2rtc. A single camera can produce a main (i.e. high resolution) and a sub (i.e. lower resolution) video feed. + +Typically, the sub-feed will be decoded to produce full-frame images. As part of this process, the resolution may be downscaled and an image sampling frequency may be imposed (e.g. keep 5 frames per second). + +These frames will then be compared over time to detect movement areas (a.k.a. motion boxes). These motion boxes are combined into motion regions and are analyzed by a machine learning model to detect known objects. Finally, the snapshot and recording retention config will decide what video clips and events should be saved. + +## Detailed view of the video pipeline + +The following diagram adds a lot more detail than the simple view explained before. The goal is to show the detailed data paths between the processing steps. + +```mermaid +%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%% + +flowchart TD + RecStore[(Recording\nstore)] + SnapStore[(Snapshot\nstore)] + + subgraph Acquisition + Cam["Camera"] -->|FFmpeg supported| Stream + Cam -->|"Other streaming\nprotocols"| go2rtc + go2rtc("go2rtc") --> Stream + Stream[Capture main and\nsub streams] --> |detect stream|Decode(Decode and\ndownscale) + end + subgraph Motion + Decode --> MotionM(Apply\nmotion masks) + MotionM --> MotionD(Motion\ndetection) + end + subgraph Detection + MotionD --> |motion regions| ObjectD(Object detection) + Decode --> ObjectD + ObjectD --> ObjectFilter(Apply object filters & zones) + ObjectFilter --> ObjectZ(Track objects) + end + Decode --> |decoded frames|Birdseye + MotionD --> |motion event|Birdseye + ObjectZ --> |object event|Birdseye + + MotionD --> |"video segments\n(retain motion)"|RecStore + ObjectZ --> |detection clip|RecStore + Stream -->|"video segments\n(retain all)"| RecStore + ObjectZ --> |detection snapshot|SnapStore +``` diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 55adc48c7..1279f9950 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -3,14 +3,17 @@ id: configuring_go2rtc title: Configuring go2rtc --- +# Configuring go2rtc + Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features: - WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream -- RTSP (instead of RTMP) relay for use with Home Assistant or other consumers to reduce the number of connections to your camera streams +- Live stream support for cameras in Home Assistant Integration +- RTSP (instead of RTMP) relay for use with other consumers to reduce the number of connections to your camera streams # Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#module-streams), not just rtsp. ```yaml go2rtc: @@ -23,7 +26,7 @@ The easiest live view to get working is MSE. After adding this to the config, re ### What if my video doesn't play? -If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: +If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: ```yaml go2rtc: diff --git a/docs/docs/guides/false_positives.md b/docs/docs/guides/false_positives.md index 84d67a3b0..a77e9e9f3 100644 --- a/docs/docs/guides/false_positives.md +++ b/docs/docs/guides/false_positives.md @@ -3,11 +3,7 @@ id: false_positives title: Reducing false positives --- -Tune your object filters to adjust false positives: `min_area`, `max_area`, `min_ratio`, `max_ratio`, `min_score`, `threshold`. - -The `min_area` and `max_area` values are compared against the area (number of pixels) from a given detected object. If the area is outside this range, the object will be ignored as a false positive. This allows objects that must be too small or too large to be ignored. - -Similarly, the `min_ratio` and `max_ratio` values are compared against a given detected object's width/height ratio (in pixels). If the ratio is outside this range, the object will be ignored as a false positive. This allows objects that are proportionally too short-and-wide (higher ratio) or too tall-and-narrow (smaller ratio) to be ignored. +## Object Scores For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: @@ -21,3 +17,33 @@ For object filters in your configuration, any single detection below `min_score` | 6 | 0.95 | 0.7, 0.85, 0.95, 0.90, 0.88, 0.95 | 0.89 | Yes | In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example. + +### Minimum Score + +Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed. + +### Threshold + +`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an event. If `threshold` is too high then true positive events may be missed due to the object never scoring high enough. + +## Object Shape + +False positives can also be reduced by filtering a detection based on its shape. + +### Object Area + +`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. The recordings timeline can be used to determine the area of the bounding box in that frame by selecting a timeline item then mousing over or tapping the red box. + +### Object Proportions + +`min_ratio` and `max_ratio` filter on the ratio of width / height of an objects bounding box and can be used to reduce false positives. For example if a false positive is detected as very tall for a dog which is often wider, a `min_ratio` filter can be used to filter out these false positives. + +## Other Tools + +### Zones + +[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create events for objects that enter the zone. + +### Object Masks + +[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape. diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index cb67c59b4..26d8eef26 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -3,7 +3,145 @@ id: getting_started title: Getting started --- -This guide walks through the steps to build a configuration file for Frigate. It assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution. +# Getting Started + +## Setting up hardware + +This section guides you through setting up a server with Debian Bookworm and Docker. If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. + +### Install Debian 12 (Bookworm) + +There are many guides on how to install Debian Server, so this will be an abbreviated guide. Connect a temporary monitor and keyboard to your device so you can install a minimal server without a desktop environment. + +#### Prepare installation media + +1. Download the small installation image from the [Debian website](https://www.debian.org/distrib/netinst) +1. Flash the ISO to a USB device (popular tool is [balena Etcher](https://etcher.balena.io/)) +1. Boot your device from USB + +#### Install and setup Debian for remote access + +1. Ensure your device is connected to the network so updates and software options can be installed +1. Choose the non-graphical install option if you don't have a mouse connected, but either install method works fine +1. You will be prompted to set the root user password and create a user with a password +1. Install the minimum software. Fewer dependencies result in less maintenance. + 1. Uncheck "Debian desktop environment" and "GNOME" + 1. Check "SSH server" + 1. Keep "standard system utilities" checked +1. After reboot, login as root at the command prompt to add user to sudoers + 1. Install sudo + ```bash + apt update && apt install -y sudo + ``` + 1. Add the user you created to the sudo group (change `blake` to your own user) + ```bash + usermod -aG sudo blake + ``` +1. Shutdown by running `poweroff` + +At this point, you can install the device in a permanent location. The remaining steps can be performed via SSH from another device. If you don't have an SSH client, you can install one of the options listed in the [Visual Studio Code documentation](https://code.visualstudio.com/docs/remote/troubleshooting#_installing-a-supported-ssh-client). + +#### Finish setup via SSH + +1. Connect via SSH and login with your non-root user created during install +1. Setup passwordless sudo so you don't have to type your password for each sudo command (change `blake` in the command below to your user) + + ```bash + echo 'blake ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/user + ``` + +1. Logout and login again to activate passwordless sudo +1. Setup automatic security updates for the OS (optional) + 1. Ensure everything is up to date by running + ```bash + sudo apt update && sudo apt upgrade -y + ``` + 1. Install unattended upgrades + ```bash + sudo apt install -y unattended-upgrades + echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | sudo debconf-set-selections + sudo dpkg-reconfigure -f noninteractive unattended-upgrades + ``` + +Now you have a minimal Debian server that requires very little maintenance. + +### Install Docker + +1. Install Docker Engine (not Docker Desktop) using the [official docs](https://docs.docker.com/engine/install/debian/) + 1. Specifically, follow the steps in the [Install using the apt repository](https://docs.docker.com/engine/install/debian/#install-using-the-repository) section +2. Add your user to the docker group as described in the [Linux postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) + +## Installing Frigate + +This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate). + +### Setup directories + +Frigate requires a valid config file to start. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation. + +``` +. +├── docker-compose.yml +├── config/ +│ └── config.yml +└── storage/ +``` + +This will create the above structure: + +```bash +mkdir storage config && touch docker-compose.yml config/config.yml +``` + +If you are setting up Frigate on a Linux device via SSH, you can use [nano](https://itsfoss.com/nano-editor-guide/) to edit the following files. If you prefer to edit remote files with a full editor instead of a terminal, I recommend using [Visual Studio Code](https://code.visualstudio.com/) with the [Remote SSH extension](https://code.visualstudio.com/docs/remote/ssh-tutorial). + +:::note + +This `docker-compose.yml` file is just a starter for amd64 devices. You will need to customize it for your setup as detailed in the [Installation docs](/frigate/installation#docker). + +::: +`docker-compose.yml` + +```yaml +version: "3.9" +services: + frigate: + container_name: frigate + restart: unless-stopped + image: ghcr.io/blakeblackshear/frigate:stable + volumes: + - ./config:/config + - ./storage:/media/frigate + - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear + target: /tmp/cache + tmpfs: + size: 1000000000 + ports: + - "5000:5000" + - "8554:8554" # RTSP feeds +``` + +`config.yml` + +```yaml +mqtt: + enabled: False + +cameras: + dummy_camera: # <--- this will be changed to your actual camera later + enabled: False + ffmpeg: + inputs: + - path: rtsp://127.0.0.1:554/rtsp + roles: + - detect +``` + +Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. Frigate should now be accessible at `server_ip:5000` and you can finish the configuration using the built-in configuration editor. + +## Configuring Frigate + +This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution. ### Step 1: Add a detect stream @@ -15,6 +153,7 @@ mqtt: cameras: name_of_your_camera: # <------ Name the camera + enabled: True ffmpeg: inputs: - path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection @@ -22,8 +161,6 @@ cameras: - detect detect: enabled: False # <---- disable detection until you have a working camera feed - width: 1280 # <---- update for your camera's resolution - height: 720 # <---- update for your camera's resolution ``` ### Step 2: Start Frigate @@ -38,7 +175,21 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware. -Here is an example configuration with hardware acceleration configured for Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): +Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): + +`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) + +```yaml +version: "3.9" +services: + frigate: + ... + devices: + - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + ... +``` + +`config.yml` ```yaml mqtt: ... @@ -55,6 +206,19 @@ cameras: By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config. +`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) + +```yaml +version: "3.9" +services: + frigate: + ... + devices: + - /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions + - /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux + ... +``` + ```yaml mqtt: ... @@ -105,9 +269,6 @@ cameras: - path: rtsp://10.0.10.10:554/rtsp roles: - detect - detect: - width: 1280 - height: 720 motion: mask: - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 diff --git a/docs/docs/guides/ha_network_storage.md b/docs/docs/guides/ha_network_storage.md new file mode 100644 index 000000000..498dd7d0c --- /dev/null +++ b/docs/docs/guides/ha_network_storage.md @@ -0,0 +1,45 @@ +--- +id: ha_network_storage +title: HA Network Storage +--- + +As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons. + +## Setting Up Remote Storage For Frigate + +### Prerequisites + +- HA Core 2023.6 or newer is installed +- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for superivsed install) + +### Initial Setup + +1. Stop the Frigate addon +2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding: +```yaml +database: + path: /config/frigate.db +``` + +### Move current data + +Keeping the current data is optional, but the data will need to be moved regardless so the share can be created successfully. + +#### If you want to keep the current data + +1. Move the frigate.db, frigate.db-shm, frigate.db-wal files to the /config directory +2. Rename the /media/frigate folder to /media/frigate_tmp + +#### If you don't want to keep the current data + +1. Delete the /media/frigate folder and all of its contents + +### Create the media share + +1. Go to **Settings -> System -> Storage -> Add Network Storage** +2. Name the share `frigate` (this is required) +3. Choose type `media` +4. Fill out the additional required info for your particular NAS +5. Connect +6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step +7. Start the Frigate addon diff --git a/docs/docs/integrations/api.md b/docs/docs/integrations/api.md index f36f33770..20877bb6f 100644 --- a/docs/docs/integrations/api.md +++ b/docs/docs/integrations/api.md @@ -155,18 +155,25 @@ Version info Events from the database. Accepts the following query string parameters: -| param | Type | Description | -| -------------------- | ---- | --------------------------------------------- | -| `before` | int | Epoch time | -| `after` | int | Epoch time | -| `cameras` | str | , separated list of cameras | -| `labels` | str | , separated list of labels | -| `zones` | str | , separated list of zones | -| `limit` | int | Limit the number of events returned | -| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) | -| `has_clip` | int | Filter to events that have clips (0 or 1) | -| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) | -| `in_progress` | int | Limit to events in progress (0 or 1) | +| param | Type | Description | +| -------------------- | ----- | ----------------------------------------------------- | +| `before` | int | Epoch time | +| `after` | int | Epoch time | +| `cameras` | str | , separated list of cameras | +| `labels` | str | , separated list of labels | +| `zones` | str | , separated list of zones | +| `limit` | int | Limit the number of events returned | +| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) | +| `has_clip` | int | Filter to events that have clips (0 or 1) | +| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) | +| `in_progress` | int | Limit to events in progress (0 or 1) | +| `time_range` | str | Time range in format after,before (00:00,24:00) | +| `timezone` | str | Timezone to use for time range | +| `min_score` | float | Minimum score of the event | +| `max_score` | float | Maximum score of the event | +| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) | +| `min_length` | float | Minimum length of the event | +| `max_length` | float | Maximum length of the event | ### `GET /api/timeline` @@ -217,7 +224,8 @@ Sub labels must be 100 characters or shorter. ```json { - "subLabel": "some_string" + "subLabel": "some_string", + "subLabelScore": 0.79, } ``` @@ -251,10 +259,19 @@ Accepts the following query string parameters, but they are only applied when an Returns the snapshot image from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type. -### `GET /api//recording//snapshot.png` +### `GET /api//recordings//snapshot.png` Returns the snapshot image from the specific point in that cameras recordings. +### `GET /api//grid.jpg` + +Returns the latest camera image with the regions grid overlaid. + +| param | Type | Description | +| ------------ | ----- | ------------------------------------------------------------------------------------------ | +| `color` | str | The color of the grid (red,green,blue,black,white). Defaults to "green". | +| `font_scale` | float | Font scale. Can be used to increase font size on high resolution cameras. Defaults to 0.5. | + ### `GET /clips/-.jpg` JPG snapshot for the given camera and event id. @@ -285,6 +302,14 @@ It is also possible to export this recording as a timelapse. } ``` +### `DELETE /api/export/` + +Delete an export from disk. + +### `PATCH /api/export//` + +Renames an export. + ### `GET /api//recordings/summary` Hourly summary of recordings data for a camera. @@ -312,13 +337,19 @@ Get PTZ info for the camera. ### `POST /api/events//