Merge branch 'dev' into object-speed
@ -2,6 +2,7 @@ aarch
|
|||||||
absdiff
|
absdiff
|
||||||
airockchip
|
airockchip
|
||||||
Alloc
|
Alloc
|
||||||
|
alpr
|
||||||
Amcrest
|
Amcrest
|
||||||
amdgpu
|
amdgpu
|
||||||
analyzeduration
|
analyzeduration
|
||||||
@ -61,6 +62,7 @@ dsize
|
|||||||
dtype
|
dtype
|
||||||
ECONNRESET
|
ECONNRESET
|
||||||
edgetpu
|
edgetpu
|
||||||
|
facenet
|
||||||
fastapi
|
fastapi
|
||||||
faststart
|
faststart
|
||||||
fflags
|
fflags
|
||||||
@ -114,6 +116,8 @@ itemsize
|
|||||||
Jellyfin
|
Jellyfin
|
||||||
jetson
|
jetson
|
||||||
jetsons
|
jetsons
|
||||||
|
jina
|
||||||
|
jinaai
|
||||||
joserfc
|
joserfc
|
||||||
jsmpeg
|
jsmpeg
|
||||||
jsonify
|
jsonify
|
||||||
@ -187,6 +191,7 @@ openai
|
|||||||
opencv
|
opencv
|
||||||
openvino
|
openvino
|
||||||
OWASP
|
OWASP
|
||||||
|
paddleocr
|
||||||
paho
|
paho
|
||||||
passwordless
|
passwordless
|
||||||
popleft
|
popleft
|
||||||
|
|||||||
4
.github/actions/setup/action.yml
vendored
@ -33,9 +33,9 @@ runs:
|
|||||||
with:
|
with:
|
||||||
string: ${{ github.repository }}
|
string: ${{ github.repository }}
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
||||||
with:
|
with:
|
||||||
|
|||||||
86
.github/workflows/ci.yml
vendored
@ -7,7 +7,7 @@ on:
|
|||||||
- dev
|
- dev
|
||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- "docs/**"
|
||||||
|
|
||||||
# only run the latest commit to avoid cache overwrites
|
# only run the latest commit to avoid cache overwrites
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -19,11 +19,13 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
amd64_build:
|
amd64_build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: AMD64 Build
|
name: AMD64 Build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
@ -40,11 +42,13 @@ jobs:
|
|||||||
tags: ${{ steps.setup.outputs.image-name }}-amd64
|
tags: ${{ steps.setup.outputs.image-name }}-amd64
|
||||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||||
arm64_build:
|
arm64_build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: ARM Build
|
name: ARM Build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
@ -62,8 +66,9 @@ jobs:
|
|||||||
${{ steps.setup.outputs.image-name }}-standard-arm64
|
${{ steps.setup.outputs.image-name }}-standard-arm64
|
||||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||||
- name: Build and push RPi build
|
- name: Build and push RPi build
|
||||||
uses: docker/bake-action@v4
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: rpi
|
targets: rpi
|
||||||
files: docker/rpi/rpi.hcl
|
files: docker/rpi/rpi.hcl
|
||||||
@ -71,21 +76,15 @@ jobs:
|
|||||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||||
- name: Build and push Rockchip build
|
|
||||||
uses: docker/bake-action@v3
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
targets: rk
|
|
||||||
files: docker/rockchip/rk.hcl
|
|
||||||
set: |
|
|
||||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
|
||||||
*.cache-from=type=gha
|
|
||||||
jetson_jp4_build:
|
jetson_jp4_build:
|
||||||
runs-on: ubuntu-latest
|
if: false
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
name: Jetson Jetpack 4
|
name: Jetson Jetpack 4
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
@ -97,8 +96,9 @@ jobs:
|
|||||||
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
|
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||||
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||||
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||||
uses: docker/bake-action@v4
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: tensorrt
|
targets: tensorrt
|
||||||
files: docker/tensorrt/trt.hcl
|
files: docker/tensorrt/trt.hcl
|
||||||
@ -107,11 +107,14 @@ jobs:
|
|||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
|
||||||
jetson_jp5_build:
|
jetson_jp5_build:
|
||||||
runs-on: ubuntu-latest
|
if: false
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
name: Jetson Jetpack 5
|
name: Jetson Jetpack 5
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
@ -123,8 +126,9 @@ jobs:
|
|||||||
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||||
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||||
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||||
uses: docker/bake-action@v4
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: tensorrt
|
targets: tensorrt
|
||||||
files: docker/tensorrt/trt.hcl
|
files: docker/tensorrt/trt.hcl
|
||||||
@ -133,13 +137,15 @@ jobs:
|
|||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
|
||||||
amd64_extra_builds:
|
amd64_extra_builds:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: AMD64 Extra Build
|
name: AMD64 Extra Build
|
||||||
needs:
|
needs:
|
||||||
- amd64_build
|
- amd64_build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
@ -148,8 +154,9 @@ jobs:
|
|||||||
- name: Build and push TensorRT (x86 GPU)
|
- name: Build and push TensorRT (x86 GPU)
|
||||||
env:
|
env:
|
||||||
COMPUTE_LEVEL: "50 60 70 80 90"
|
COMPUTE_LEVEL: "50 60 70 80 90"
|
||||||
uses: docker/bake-action@v4
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: tensorrt
|
targets: tensorrt
|
||||||
files: docker/tensorrt/trt.hcl
|
files: docker/tensorrt/trt.hcl
|
||||||
@ -157,22 +164,38 @@ jobs:
|
|||||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
|
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
||||||
|
- name: AMD/ROCm general build
|
||||||
|
env:
|
||||||
|
AMDGPU: gfx
|
||||||
|
HSA_OVERRIDE: 0
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: .
|
||||||
|
push: true
|
||||||
|
targets: rocm
|
||||||
|
files: docker/rocm/rocm.hcl
|
||||||
|
set: |
|
||||||
|
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
||||||
|
*.cache-from=type=gha
|
||||||
arm64_extra_builds:
|
arm64_extra_builds:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: ARM Extra Build
|
name: ARM Extra Build
|
||||||
needs:
|
needs:
|
||||||
- arm64_build
|
- arm64_build
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
with:
|
with:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push Rockchip build
|
- name: Build and push Rockchip build
|
||||||
uses: docker/bake-action@v3
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: rk
|
targets: rk
|
||||||
files: docker/rockchip/rk.hcl
|
files: docker/rockchip/rk.hcl
|
||||||
@ -180,7 +203,7 @@ jobs:
|
|||||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||||
*.cache-from=type=gha
|
*.cache-from=type=gha
|
||||||
combined_extra_builds:
|
combined_extra_builds:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: Combined Extra Builds
|
name: Combined Extra Builds
|
||||||
needs:
|
needs:
|
||||||
- amd64_build
|
- amd64_build
|
||||||
@ -188,14 +211,17 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU and Buildx
|
- name: Set up QEMU and Buildx
|
||||||
id: setup
|
id: setup
|
||||||
uses: ./.github/actions/setup
|
uses: ./.github/actions/setup
|
||||||
with:
|
with:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push Hailo-8l build
|
- name: Build and push Hailo-8l build
|
||||||
uses: docker/bake-action@v4
|
uses: docker/bake-action@v6
|
||||||
with:
|
with:
|
||||||
|
source: .
|
||||||
push: true
|
push: true
|
||||||
targets: h8l
|
targets: h8l
|
||||||
files: docker/hailo8l/h8l.hcl
|
files: docker/hailo8l/h8l.hcl
|
||||||
@ -203,22 +229,10 @@ jobs:
|
|||||||
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
|
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
|
||||||
- name: AMD/ROCm general build
|
|
||||||
env:
|
|
||||||
AMDGPU: gfx
|
|
||||||
HSA_OVERRIDE: 0
|
|
||||||
uses: docker/bake-action@v3
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
targets: rocm
|
|
||||||
files: docker/rocm/rocm.hcl
|
|
||||||
set: |
|
|
||||||
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
|
||||||
*.cache-from=type=gha
|
|
||||||
# The majority of users running arm64 are rpi users, so the rpi
|
# The majority of users running arm64 are rpi users, so the rpi
|
||||||
# build should be the primary arm64 image
|
# build should be the primary arm64 image
|
||||||
assemble_default_build:
|
assemble_default_build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
name: Assemble and push default build
|
name: Assemble and push default build
|
||||||
needs:
|
needs:
|
||||||
- amd64_build
|
- amd64_build
|
||||||
|
|||||||
24
.github/workflows/dependabot-auto-merge.yaml
vendored
@ -1,24 +0,0 @@
|
|||||||
name: dependabot-auto-merge
|
|
||||||
on: pull_request
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dependabot-auto-merge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.actor == 'dependabot[bot]'
|
|
||||||
steps:
|
|
||||||
- name: Get Dependabot metadata
|
|
||||||
id: metadata
|
|
||||||
uses: dependabot/fetch-metadata@v2
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Enable auto-merge for Dependabot PRs
|
|
||||||
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
|
|
||||||
run: |
|
|
||||||
gh pr review --approve "$PR_URL"
|
|
||||||
gh pr merge --auto --squash "$PR_URL"
|
|
||||||
env:
|
|
||||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
16
.github/workflows/pull_request.yml
vendored
@ -3,10 +3,10 @@ name: On pull request
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- "docs/**"
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PYTHON: 3.9
|
DEFAULT_PYTHON: 3.11
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_devcontainer:
|
build_devcontainer:
|
||||||
@ -19,6 +19,8 @@ jobs:
|
|||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
with:
|
with:
|
||||||
node-version: 16.x
|
node-version: 16.x
|
||||||
@ -38,6 +40,8 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
with:
|
with:
|
||||||
node-version: 16.x
|
node-version: 16.x
|
||||||
@ -52,6 +56,8 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
with:
|
with:
|
||||||
node-version: 20.x
|
node-version: 20.x
|
||||||
@ -67,8 +73,10 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out the repository
|
- name: Check out the repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||||
uses: actions/setup-python@v5.1.0
|
uses: actions/setup-python@v5.3.0
|
||||||
with:
|
with:
|
||||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||||
- name: Install requirements
|
- name: Install requirements
|
||||||
@ -88,6 +96,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- uses: actions/setup-node@master
|
- uses: actions/setup-node@master
|
||||||
with:
|
with:
|
||||||
node-version: 16.x
|
node-version: 16.x
|
||||||
|
|||||||
9
.github/workflows/release.yml
vendored
@ -11,6 +11,8 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- id: lowercaseRepo
|
- id: lowercaseRepo
|
||||||
uses: ASzc/change-string-case-action@v6
|
uses: ASzc/change-string-case-action@v6
|
||||||
with:
|
with:
|
||||||
@ -22,10 +24,13 @@ jobs:
|
|||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Create tag variables
|
- name: Create tag variables
|
||||||
|
env:
|
||||||
|
TAG: ${{ github.ref_name }}
|
||||||
|
LOWERCASE_REPO: ${{ steps.lowercaseRepo.outputs.lowercase }}
|
||||||
run: |
|
run: |
|
||||||
BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
BUILD_TYPE=$([[ "${TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
||||||
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
|
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
|
||||||
echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
|
echo "BASE=ghcr.io/${LOWERCASE_REPO}" >> $GITHUB_ENV
|
||||||
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
||||||
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
|
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
|
||||||
- name: Tag and push the main image
|
- name: Tag and push the main image
|
||||||
|
|||||||
5
.github/workflows/stale.yml
vendored
@ -23,7 +23,9 @@ jobs:
|
|||||||
exempt-pr-labels: "pinned,security,dependencies"
|
exempt-pr-labels: "pinned,security,dependencies"
|
||||||
operations-per-run: 120
|
operations-per-run: 120
|
||||||
- name: Print outputs
|
- name: Print outputs
|
||||||
run: echo ${{ join(steps.stale.outputs.*, ',') }}
|
env:
|
||||||
|
STALE_OUTPUT: ${{ join(steps.stale.outputs.*, ',') }}
|
||||||
|
run: echo "$STALE_OUTPUT"
|
||||||
|
|
||||||
# clean_ghcr:
|
# clean_ghcr:
|
||||||
# name: Delete outdated dev container images
|
# name: Delete outdated dev container images
|
||||||
@ -38,4 +40,3 @@ jobs:
|
|||||||
# account-type: personal
|
# account-type: personal
|
||||||
# token: ${{ secrets.GITHUB_TOKEN }}
|
# token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
# token-type: github-token
|
# token-type: github-token
|
||||||
|
|
||||||
|
|||||||
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
default_target: local
|
default_target: local
|
||||||
|
|
||||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||||
VERSION = 0.15.0
|
VERSION = 0.16.0
|
||||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||||
BOARDS= #Initialized empty
|
BOARDS= #Initialized empty
|
||||||
|
|||||||
@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
|
|||||||
object_detector.cleanup()
|
object_detector.cleanup()
|
||||||
print(f"{id} - Processed for {duration:.2f} seconds.")
|
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||||
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||||
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
|
||||||
|
|
||||||
|
|
||||||
######
|
######
|
||||||
|
|||||||
@ -5,6 +5,7 @@ ARG DEBIAN_FRONTEND=noninteractive
|
|||||||
# Build Python wheels
|
# Build Python wheels
|
||||||
FROM wheels AS h8l-wheels
|
FROM wheels AS h8l-wheels
|
||||||
|
|
||||||
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
|
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ COPY --from=hailort /hailo-wheels /deps/hailo-wheels
|
|||||||
COPY --from=hailort /rootfs/ /
|
COPY --from=hailort /rootfs/ /
|
||||||
|
|
||||||
# Install the wheels
|
# Install the wheels
|
||||||
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
RUN pip3 install -U /deps/h8l-wheels/*.whl
|
||||||
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
RUN pip3 install -U /deps/hailo-wheels/*.whl
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
set -euxo pipefail
|
set -euxo pipefail
|
||||||
|
|
||||||
hailo_version="4.19.0"
|
hailo_version="4.20.0"
|
||||||
|
|
||||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
arch="x86_64"
|
arch="x86_64"
|
||||||
@ -15,5 +15,5 @@ wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_ver
|
|||||||
|
|
||||||
mkdir -p /hailo-wheels
|
mkdir -p /hailo-wheels
|
||||||
|
|
||||||
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"
|
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||||
|
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
appdirs==1.4.4
|
appdirs==1.4.*
|
||||||
argcomplete==2.0.0
|
argcomplete==2.0.*
|
||||||
contextlib2==0.6.0.post1
|
contextlib2==0.6.*
|
||||||
distlib==0.3.6
|
distlib==0.3.*
|
||||||
filelock==3.8.0
|
filelock==3.8.*
|
||||||
future==0.18.2
|
future==0.18.*
|
||||||
importlib-metadata==5.1.0
|
importlib-metadata==5.1.*
|
||||||
importlib-resources==5.1.2
|
importlib-resources==5.1.*
|
||||||
netaddr==0.8.0
|
netaddr==0.8.*
|
||||||
netifaces==0.10.9
|
netifaces==0.10.*
|
||||||
verboselogs==1.7
|
verboselogs==1.7.*
|
||||||
virtualenv==20.17.0
|
virtualenv==20.17.*
|
||||||
|
|||||||
@ -4,6 +4,7 @@
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y build-essential cmake git wget
|
sudo apt-get install -y build-essential cmake git wget
|
||||||
|
|
||||||
|
hailo_version="4.20.0"
|
||||||
arch=$(uname -m)
|
arch=$(uname -m)
|
||||||
|
|
||||||
if [[ $arch == "x86_64" ]]; then
|
if [[ $arch == "x86_64" ]]; then
|
||||||
@ -13,7 +14,7 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Clone the HailoRT driver repository
|
# Clone the HailoRT driver repository
|
||||||
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
|
git clone --depth 1 --branch v${hailo_version} https://github.com/hailo-ai/hailort-drivers.git
|
||||||
|
|
||||||
# Build and install the HailoRT driver
|
# Build and install the HailoRT driver
|
||||||
cd hailort-drivers/linux/pcie
|
cd hailort-drivers/linux/pcie
|
||||||
|
|||||||
@ -3,12 +3,12 @@
|
|||||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
ARG BASE_IMAGE=debian:11
|
ARG BASE_IMAGE=debian:12
|
||||||
ARG SLIM_BASE=debian:11-slim
|
ARG SLIM_BASE=debian:12-slim
|
||||||
|
|
||||||
FROM ${BASE_IMAGE} AS base
|
FROM ${BASE_IMAGE} AS base
|
||||||
|
|
||||||
FROM --platform=${BUILDPLATFORM} debian:11 AS base_host
|
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
|
||||||
|
|
||||||
FROM ${SLIM_BASE} AS slim-base
|
FROM ${SLIM_BASE} AS slim-base
|
||||||
|
|
||||||
@ -66,8 +66,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
|
|||||||
RUN apt-get -qq update \
|
RUN apt-get -qq update \
|
||||||
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
||||||
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
&& python3 get-pip.py "pip" \
|
&& python3 get-pip.py "pip" --break-system-packages \
|
||||||
&& pip install -r /requirements-ov.txt
|
&& pip install --break-system-packages -r /requirements-ov.txt
|
||||||
|
|
||||||
# Get OpenVino Model
|
# Get OpenVino Model
|
||||||
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
||||||
@ -139,24 +139,17 @@ ARG TARGETARCH
|
|||||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||||
RUN apt-get -qq update \
|
RUN apt-get -qq update \
|
||||||
&& apt-get -qq install -y \
|
&& apt-get -qq install -y \
|
||||||
apt-transport-https \
|
apt-transport-https wget \
|
||||||
gnupg \
|
|
||||||
wget \
|
|
||||||
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
|
|
||||||
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
|
|
||||||
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
|
|
||||||
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
|
|
||||||
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
|
|
||||||
&& apt-get -qq update \
|
&& apt-get -qq update \
|
||||||
&& apt-get -qq install -y \
|
&& apt-get -qq install -y \
|
||||||
python3.9 \
|
python3 \
|
||||||
python3.9-dev \
|
python3-dev \
|
||||||
# opencv dependencies
|
# opencv dependencies
|
||||||
build-essential cmake git pkg-config libgtk-3-dev \
|
build-essential cmake git pkg-config libgtk-3-dev \
|
||||||
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
||||||
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
|
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
|
||||||
gfortran openexr libatlas-base-dev libssl-dev\
|
gfortran openexr libatlas-base-dev libssl-dev\
|
||||||
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
|
libtbbmalloc2 libtbb-dev libdc1394-dev libopenexr-dev \
|
||||||
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
||||||
# sqlite3 dependencies
|
# sqlite3 dependencies
|
||||||
tclsh \
|
tclsh \
|
||||||
@ -164,14 +157,11 @@ RUN apt-get -qq update \
|
|||||||
gcc gfortran libopenblas-dev liblapack-dev && \
|
gcc gfortran libopenblas-dev liblapack-dev && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Ensure python3 defaults to python3.9
|
|
||||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
|
|
||||||
|
|
||||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
&& python3 get-pip.py "pip"
|
&& python3 get-pip.py "pip" --break-system-packages
|
||||||
|
|
||||||
COPY docker/main/requirements.txt /requirements.txt
|
COPY docker/main/requirements.txt /requirements.txt
|
||||||
RUN pip3 install -r /requirements.txt
|
RUN pip3 install -r /requirements.txt --break-system-packages
|
||||||
|
|
||||||
# Build pysqlite3 from source
|
# Build pysqlite3 from source
|
||||||
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
||||||
@ -215,15 +205,14 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
|||||||
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||||
|
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||||
/deps/install_deps.sh
|
/deps/install_deps.sh
|
||||||
|
|
||||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||||
python3 -m pip install --upgrade pip && \
|
python3 -m pip install --upgrade pip --break-system-packages && \
|
||||||
pip3 install -U /deps/wheels/*.whl
|
pip3 install -U /deps/wheels/*.whl --break-system-packages
|
||||||
|
|
||||||
COPY --from=deps-rootfs / /
|
COPY --from=deps-rootfs / /
|
||||||
|
|
||||||
@ -270,7 +259,7 @@ RUN apt-get update \
|
|||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
|
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
|
||||||
pip3 install -r requirements-dev.txt
|
pip3 install -r requirements-dev.txt --break-system-packages
|
||||||
|
|
||||||
HEALTHCHECK NONE
|
HEALTHCHECK NONE
|
||||||
|
|
||||||
|
|||||||
@ -8,10 +8,16 @@ SECURE_TOKEN_MODULE_VERSION="1.5"
|
|||||||
SET_MISC_MODULE_VERSION="v0.33"
|
SET_MISC_MODULE_VERSION="v0.33"
|
||||||
NGX_DEVEL_KIT_VERSION="v0.3.3"
|
NGX_DEVEL_KIT_VERSION="v0.3.3"
|
||||||
|
|
||||||
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
source /etc/os-release
|
||||||
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
|
||||||
apt-get update
|
|
||||||
|
|
||||||
|
if [[ "$VERSION_ID" == "12" ]]; then
|
||||||
|
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
|
||||||
|
else
|
||||||
|
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
||||||
|
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
||||||
|
fi
|
||||||
|
|
||||||
|
apt-get update
|
||||||
apt-get -yqq build-dep nginx
|
apt-get -yqq build-dep nginx
|
||||||
|
|
||||||
apt-get -yqq install --no-install-recommends ca-certificates wget
|
apt-get -yqq install --no-install-recommends ca-certificates wget
|
||||||
|
|||||||
@ -4,7 +4,7 @@ from openvino.tools import mo
|
|||||||
ov_model = mo.convert_model(
|
ov_model = mo.convert_model(
|
||||||
"/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb",
|
"/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb",
|
||||||
compress_to_fp16=True,
|
compress_to_fp16=True,
|
||||||
transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
|
transformations_config="/usr/local/lib/python3.11/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json",
|
||||||
tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config",
|
tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config",
|
||||||
reverse_input_channels=True,
|
reverse_input_channels=True,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -4,8 +4,15 @@ set -euxo pipefail
|
|||||||
|
|
||||||
SQLITE_VEC_VERSION="0.1.3"
|
SQLITE_VEC_VERSION="0.1.3"
|
||||||
|
|
||||||
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
source /etc/os-release
|
||||||
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
|
||||||
|
if [[ "$VERSION_ID" == "12" ]]; then
|
||||||
|
sed -i '/^Types:/s/deb/& deb-src/' /etc/apt/sources.list.d/debian.sources
|
||||||
|
else
|
||||||
|
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
||||||
|
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
||||||
|
fi
|
||||||
|
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get -yqq build-dep sqlite3 gettext git
|
apt-get -yqq build-dep sqlite3 gettext git
|
||||||
|
|
||||||
|
|||||||
@ -11,33 +11,34 @@ apt-get -qq install --no-install-recommends -y \
|
|||||||
lbzip2 \
|
lbzip2 \
|
||||||
procps vainfo \
|
procps vainfo \
|
||||||
unzip locales tzdata libxml2 xz-utils \
|
unzip locales tzdata libxml2 xz-utils \
|
||||||
python3.9 \
|
python3 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
curl \
|
curl \
|
||||||
lsof \
|
lsof \
|
||||||
jq \
|
jq \
|
||||||
nethogs
|
nethogs \
|
||||||
|
libgl1 \
|
||||||
# ensure python3 defaults to python3.9
|
libglib2.0-0 \
|
||||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
|
libusb-1.0.0
|
||||||
|
|
||||||
mkdir -p -m 600 /root/.gnupg
|
mkdir -p -m 600 /root/.gnupg
|
||||||
|
|
||||||
# add coral repo
|
# install coral runtime
|
||||||
curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
|
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
|
||||||
gpg --dearmor -o /etc/apt/trusted.gpg.d/google-cloud-packages-archive-keyring.gpg
|
unset DEBIAN_FRONTEND
|
||||||
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list
|
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
|
||||||
echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections
|
rm /tmp/libedgetpu1-max.deb
|
||||||
|
|
||||||
# enable non-free repo in Debian
|
# install python3 & tflite runtime
|
||||||
if grep -q "Debian" /etc/issue; then
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
|
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
|
||||||
|
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# coral drivers
|
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
apt-get -qq update
|
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
|
||||||
libedgetpu1-max python3-tflite-runtime python3-pycoral
|
fi
|
||||||
|
|
||||||
# btbn-ffmpeg -> amd64
|
# btbn-ffmpeg -> amd64
|
||||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
@ -65,23 +66,15 @@ fi
|
|||||||
|
|
||||||
# arch specific packages
|
# arch specific packages
|
||||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||||
# use debian bookworm for amd / intel-i965 driver packages
|
# install amd / intel-i965 driver packages
|
||||||
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
|
|
||||||
apt-get -qq update
|
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
||||||
i965-va-driver intel-gpu-tools onevpl-tools \
|
i965-va-driver intel-gpu-tools onevpl-tools \
|
||||||
libva-drm2 \
|
libva-drm2 \
|
||||||
mesa-va-drivers radeontop
|
mesa-va-drivers radeontop
|
||||||
|
|
||||||
# something about this dependency requires it to be installed in a separate call rather than in the line above
|
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y \
|
|
||||||
i965-va-driver-shaders
|
|
||||||
|
|
||||||
# intel packages use zst compression so we need to update dpkg
|
# intel packages use zst compression so we need to update dpkg
|
||||||
apt-get install -y dpkg
|
apt-get install -y dpkg
|
||||||
|
|
||||||
rm -f /etc/apt/sources.list.d/debian-bookworm.list
|
|
||||||
|
|
||||||
# use intel apt intel packages
|
# use intel apt intel packages
|
||||||
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
aiofiles == 24.1.*
|
||||||
click == 8.1.*
|
click == 8.1.*
|
||||||
# FastAPI
|
# FastAPI
|
||||||
aiohttp == 3.11.2
|
aiohttp == 3.11.2
|
||||||
@ -10,10 +11,10 @@ imutils == 0.5.*
|
|||||||
joserfc == 1.0.*
|
joserfc == 1.0.*
|
||||||
pathvalidate == 3.2.*
|
pathvalidate == 3.2.*
|
||||||
markupsafe == 2.1.*
|
markupsafe == 2.1.*
|
||||||
|
python-multipart == 0.0.12
|
||||||
|
# General
|
||||||
mypy == 1.6.1
|
mypy == 1.6.1
|
||||||
numpy == 1.26.*
|
onvif-zeep-async == 3.1.*
|
||||||
onvif_zeep == 0.2.12
|
|
||||||
opencv-python-headless == 4.9.0.*
|
|
||||||
paho-mqtt == 2.1.*
|
paho-mqtt == 2.1.*
|
||||||
pandas == 2.2.*
|
pandas == 2.2.*
|
||||||
peewee == 3.17.*
|
peewee == 3.17.*
|
||||||
@ -27,15 +28,19 @@ ruamel.yaml == 0.18.*
|
|||||||
tzlocal == 5.2
|
tzlocal == 5.2
|
||||||
requests == 2.32.*
|
requests == 2.32.*
|
||||||
types-requests == 2.32.*
|
types-requests == 2.32.*
|
||||||
scipy == 1.13.*
|
|
||||||
norfair == 2.2.*
|
norfair == 2.2.*
|
||||||
setproctitle == 1.3.*
|
setproctitle == 1.3.*
|
||||||
ws4py == 0.5.*
|
ws4py == 0.5.*
|
||||||
unidecode == 1.3.*
|
unidecode == 1.3.*
|
||||||
|
# Image Manipulation
|
||||||
|
numpy == 1.26.*
|
||||||
|
opencv-python-headless == 4.10.0.*
|
||||||
|
opencv-contrib-python == 4.9.0.*
|
||||||
|
scipy == 1.14.*
|
||||||
# OpenVino & ONNX
|
# OpenVino & ONNX
|
||||||
openvino == 2024.3.*
|
openvino == 2024.4.*
|
||||||
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
|
onnxruntime-openvino == 1.20.* ; platform_machine == 'x86_64'
|
||||||
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
|
onnxruntime == 1.20.* ; platform_machine == 'aarch64'
|
||||||
# Embeddings
|
# Embeddings
|
||||||
transformers == 4.45.*
|
transformers == 4.45.*
|
||||||
# Generative AI
|
# Generative AI
|
||||||
@ -45,3 +50,7 @@ openai == 1.51.*
|
|||||||
# push notifications
|
# push notifications
|
||||||
py-vapid == 1.9.*
|
py-vapid == 1.9.*
|
||||||
pywebpush == 2.0.*
|
pywebpush == 2.0.*
|
||||||
|
# alpr
|
||||||
|
pyclipper == 1.3.*
|
||||||
|
shapely == 2.0.*
|
||||||
|
prometheus-client == 0.21.*
|
||||||
|
|||||||
@ -1,2 +1,2 @@
|
|||||||
scikit-build == 0.17.*
|
scikit-build == 0.18.*
|
||||||
nvidia-pyindex
|
nvidia-pyindex
|
||||||
|
|||||||
@ -42,8 +42,14 @@ function migrate_db_path() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function set_libva_version() {
|
||||||
|
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||||
|
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||||
|
}
|
||||||
|
|
||||||
echo "[INFO] Preparing Frigate..."
|
echo "[INFO] Preparing Frigate..."
|
||||||
migrate_db_path
|
migrate_db_path
|
||||||
|
set_libva_version
|
||||||
echo "[INFO] Starting Frigate..."
|
echo "[INFO] Starting Frigate..."
|
||||||
|
|
||||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||||
|
|||||||
@ -43,6 +43,11 @@ function get_ip_and_port_from_supervisor() {
|
|||||||
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function set_libva_version() {
|
||||||
|
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||||
|
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||||
|
}
|
||||||
|
|
||||||
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||||
echo "[INFO] Removing stale config from last run..."
|
echo "[INFO] Removing stale config from last run..."
|
||||||
rm /dev/shm/go2rtc.yaml
|
rm /dev/shm/go2rtc.yaml
|
||||||
@ -61,6 +66,8 @@ else
|
|||||||
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
|
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
set_libva_version
|
||||||
|
|
||||||
readonly config_path="/config"
|
readonly config_path="/config"
|
||||||
|
|
||||||
if [[ -x "${config_path}/go2rtc" ]]; then
|
if [[ -x "${config_path}/go2rtc" ]]; then
|
||||||
|
|||||||
45
docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from ruamel.yaml import YAML
|
||||||
|
|
||||||
|
sys.path.insert(0, "/opt/frigate")
|
||||||
|
from frigate.const import (
|
||||||
|
DEFAULT_FFMPEG_VERSION,
|
||||||
|
INCLUDED_FFMPEG_VERSIONS,
|
||||||
|
)
|
||||||
|
|
||||||
|
sys.path.remove("/opt/frigate")
|
||||||
|
|
||||||
|
yaml = YAML()
|
||||||
|
|
||||||
|
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||||
|
|
||||||
|
# Check if we can use .yaml instead of .yml
|
||||||
|
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||||
|
if os.path.isfile(config_file_yaml):
|
||||||
|
config_file = config_file_yaml
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file) as f:
|
||||||
|
raw_config = f.read()
|
||||||
|
|
||||||
|
if config_file.endswith((".yaml", ".yml")):
|
||||||
|
config: dict[str, any] = yaml.load(raw_config)
|
||||||
|
elif config_file.endswith(".json"):
|
||||||
|
config: dict[str, any] = json.loads(raw_config)
|
||||||
|
except FileNotFoundError:
|
||||||
|
config: dict[str, any] = {}
|
||||||
|
|
||||||
|
path = config.get("ffmpeg", {}).get("path", "default")
|
||||||
|
if path == "default":
|
||||||
|
if shutil.which("ffmpeg") is None:
|
||||||
|
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
|
||||||
|
else:
|
||||||
|
print("ffmpeg")
|
||||||
|
elif path in INCLUDED_FFMPEG_VERSIONS:
|
||||||
|
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
|
||||||
|
else:
|
||||||
|
print(f"{path}/bin/ffmpeg")
|
||||||
@ -81,6 +81,9 @@ http {
|
|||||||
open_file_cache_errors on;
|
open_file_cache_errors on;
|
||||||
aio on;
|
aio on;
|
||||||
|
|
||||||
|
# file upload size
|
||||||
|
client_max_body_size 10M;
|
||||||
|
|
||||||
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
|
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
|
||||||
vod_open_file_thread_pool default;
|
vod_open_file_thread_pool default;
|
||||||
|
|
||||||
|
|||||||
20
docker/rockchip/COCO/coco_subset_20.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
./subset/000000005001.jpg
|
||||||
|
./subset/000000038829.jpg
|
||||||
|
./subset/000000052891.jpg
|
||||||
|
./subset/000000075612.jpg
|
||||||
|
./subset/000000098261.jpg
|
||||||
|
./subset/000000181542.jpg
|
||||||
|
./subset/000000215245.jpg
|
||||||
|
./subset/000000277005.jpg
|
||||||
|
./subset/000000288685.jpg
|
||||||
|
./subset/000000301421.jpg
|
||||||
|
./subset/000000334371.jpg
|
||||||
|
./subset/000000348481.jpg
|
||||||
|
./subset/000000373353.jpg
|
||||||
|
./subset/000000397681.jpg
|
||||||
|
./subset/000000414673.jpg
|
||||||
|
./subset/000000419312.jpg
|
||||||
|
./subset/000000465822.jpg
|
||||||
|
./subset/000000475732.jpg
|
||||||
|
./subset/000000559707.jpg
|
||||||
|
./subset/000000574315.jpg
|
||||||
BIN
docker/rockchip/COCO/subset/000000005001.jpg
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docker/rockchip/COCO/subset/000000038829.jpg
Normal file
|
After Width: | Height: | Size: 209 KiB |
BIN
docker/rockchip/COCO/subset/000000052891.jpg
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docker/rockchip/COCO/subset/000000075612.jpg
Normal file
|
After Width: | Height: | Size: 102 KiB |
BIN
docker/rockchip/COCO/subset/000000098261.jpg
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docker/rockchip/COCO/subset/000000181542.jpg
Normal file
|
After Width: | Height: | Size: 201 KiB |
BIN
docker/rockchip/COCO/subset/000000215245.jpg
Normal file
|
After Width: | Height: | Size: 233 KiB |
BIN
docker/rockchip/COCO/subset/000000277005.jpg
Normal file
|
After Width: | Height: | Size: 242 KiB |
BIN
docker/rockchip/COCO/subset/000000288685.jpg
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docker/rockchip/COCO/subset/000000301421.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docker/rockchip/COCO/subset/000000334371.jpg
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docker/rockchip/COCO/subset/000000348481.jpg
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docker/rockchip/COCO/subset/000000373353.jpg
Normal file
|
After Width: | Height: | Size: 281 KiB |
BIN
docker/rockchip/COCO/subset/000000397681.jpg
Normal file
|
After Width: | Height: | Size: 272 KiB |
BIN
docker/rockchip/COCO/subset/000000414673.jpg
Normal file
|
After Width: | Height: | Size: 152 KiB |
BIN
docker/rockchip/COCO/subset/000000419312.jpg
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
docker/rockchip/COCO/subset/000000465822.jpg
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
docker/rockchip/COCO/subset/000000475732.jpg
Normal file
|
After Width: | Height: | Size: 103 KiB |
BIN
docker/rockchip/COCO/subset/000000559707.jpg
Normal file
|
After Width: | Height: | Size: 203 KiB |
BIN
docker/rockchip/COCO/subset/000000574315.jpg
Normal file
|
After Width: | Height: | Size: 110 KiB |
@ -7,21 +7,26 @@ FROM wheels as rk-wheels
|
|||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
||||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||||
|
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
||||||
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
||||||
|
RUN rm -rf /rk-wheels/opencv_python-*
|
||||||
|
|
||||||
FROM deps AS rk-frigate
|
FROM deps AS rk-frigate
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
||||||
pip3 install -U /deps/rk-wheels/*.whl
|
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
COPY docker/rockchip/COCO /COCO
|
||||||
|
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
|
||||||
|
|
||||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
|
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
|
||||||
|
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||||
|
|||||||
82
docker/rockchip/conv2rknn.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
import rknn
|
||||||
|
import yaml
|
||||||
|
from rknn.api import RKNN
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(rknn.__path__[0] + "/VERSION") as file:
|
||||||
|
tk_version = file.read().strip()
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open("/config/conv2rknn.yaml", "r") as config_file:
|
||||||
|
configuration = yaml.safe_load(config_file)
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
|
||||||
|
|
||||||
|
if configuration["config"] != None:
|
||||||
|
rknn_config = configuration["config"]
|
||||||
|
else:
|
||||||
|
rknn_config = {}
|
||||||
|
|
||||||
|
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
|
||||||
|
raise Exception(
|
||||||
|
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
|
||||||
|
)
|
||||||
|
|
||||||
|
if "soc" not in configuration:
|
||||||
|
try:
|
||||||
|
with open("/proc/device-tree/compatible") as file:
|
||||||
|
soc = file.read().split(",")[-1].strip("\x00")
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception("Make sure to run docker in privileged mode.")
|
||||||
|
|
||||||
|
configuration["soc"] = [
|
||||||
|
soc,
|
||||||
|
]
|
||||||
|
|
||||||
|
if "quantization" not in configuration:
|
||||||
|
configuration["quantization"] = False
|
||||||
|
|
||||||
|
if "output_name" not in configuration:
|
||||||
|
configuration["output_name"] = "{{input_basename}}"
|
||||||
|
|
||||||
|
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
|
||||||
|
for soc in configuration["soc"]:
|
||||||
|
quant = "i8" if configuration["quantization"] else "fp16"
|
||||||
|
|
||||||
|
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
|
||||||
|
input_basename = input_filename[: input_filename.rfind(".")]
|
||||||
|
|
||||||
|
output_filename = (
|
||||||
|
configuration["output_name"].format(
|
||||||
|
quant=quant,
|
||||||
|
input_basename=input_basename,
|
||||||
|
soc=soc,
|
||||||
|
tk_version=tk_version,
|
||||||
|
)
|
||||||
|
+ ".rknn"
|
||||||
|
)
|
||||||
|
output_path = "/config/model_cache/rknn_cache/" + output_filename
|
||||||
|
|
||||||
|
rknn_config["target_platform"] = soc
|
||||||
|
|
||||||
|
rknn = RKNN(verbose=True)
|
||||||
|
rknn.config(**rknn_config)
|
||||||
|
|
||||||
|
if rknn.load_onnx(model=input_path) != 0:
|
||||||
|
raise Exception("Error loading model.")
|
||||||
|
|
||||||
|
if (
|
||||||
|
rknn.build(
|
||||||
|
do_quantization=configuration["quantization"],
|
||||||
|
dataset="/COCO/coco_subset_20.txt",
|
||||||
|
)
|
||||||
|
!= 0
|
||||||
|
):
|
||||||
|
raise Exception("Error building model.")
|
||||||
|
|
||||||
|
if rknn.export_rknn(output_path) != 0:
|
||||||
|
raise Exception("Error exporting rknn model.")
|
||||||
@ -1 +1,2 @@
|
|||||||
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl
|
rknn-toolkit2 == 2.3.0
|
||||||
|
rknn-toolkit-lite2 == 2.3.0
|
||||||
@ -34,7 +34,7 @@ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
|
|||||||
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
|
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
FROM --platform=linux/amd64 debian:11 as debian-base
|
FROM --platform=linux/amd64 debian:12 as debian-base
|
||||||
|
|
||||||
RUN apt-get update && apt-get -y upgrade
|
RUN apt-get update && apt-get -y upgrade
|
||||||
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
|
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
|
||||||
@ -51,7 +51,7 @@ COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
|
|||||||
RUN ln -s /opt/rocm-$ROCM /opt/rocm
|
RUN ln -s /opt/rocm-$ROCM /opt/rocm
|
||||||
|
|
||||||
RUN apt-get -y install g++ cmake
|
RUN apt-get -y install g++ cmake
|
||||||
RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev
|
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
|
||||||
|
|
||||||
WORKDIR /opt/build
|
WORKDIR /opt/build
|
||||||
|
|
||||||
@ -70,10 +70,11 @@ RUN apt-get -y install libnuma1
|
|||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
|
# Temporarily disabled to see if a new wheel can be built to support py3.11
|
||||||
RUN python3 -m pip install --upgrade pip \
|
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
|
||||||
&& pip3 uninstall -y onnxruntime-openvino \
|
#RUN python3 -m pip install --upgrade pip \
|
||||||
&& pip3 install -r /requirements.txt
|
# && pip3 uninstall -y onnxruntime-openvino \
|
||||||
|
# && pip3 install -r /requirements.txt
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
FROM scratch AS rocm-dist
|
FROM scratch AS rocm-dist
|
||||||
@ -86,12 +87,12 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
|
|||||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
|
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
|
||||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||||
COPY --from=rocm /opt/rocm-dist/ /
|
COPY --from=rocm /opt/rocm-dist/ /
|
||||||
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
|
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
|
||||||
|
|
||||||
#######################################################################
|
#######################################################################
|
||||||
FROM deps-prelim AS rocm-prelim-hsa-override0
|
FROM deps-prelim AS rocm-prelim-hsa-override0
|
||||||
|
\
|
||||||
ENV HSA_ENABLE_SDMA=0
|
ENV HSA_ENABLE_SDMA=0
|
||||||
|
|
||||||
COPY --from=rocm-dist / /
|
COPY --from=rocm-dist / /
|
||||||
|
|
||||||
|
|||||||
@ -12,7 +12,5 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/
|
|||||||
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
|
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
|
||||||
/deps/install_deps.sh
|
/deps/install_deps.sh
|
||||||
|
|
||||||
ENV LIBAVFORMAT_VERSION_MAJOR=58
|
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|||||||
@ -18,13 +18,14 @@ apt-get -qq install --no-install-recommends -y \
|
|||||||
mkdir -p -m 600 /root/.gnupg
|
mkdir -p -m 600 /root/.gnupg
|
||||||
|
|
||||||
# enable non-free repo
|
# enable non-free repo
|
||||||
sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list
|
echo "deb http://deb.debian.org/debian bookworm main contrib non-free non-free-firmware" | tee -a /etc/apt/sources.list
|
||||||
|
apt update
|
||||||
|
|
||||||
# ffmpeg -> arm64
|
# ffmpeg -> arm64
|
||||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||||
# add raspberry pi repo
|
# add raspberry pi repo
|
||||||
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E
|
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E
|
||||||
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list
|
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
|
||||||
apt-get -qq update
|
apt-get -qq update
|
||||||
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
|
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -7,33 +7,19 @@ ARG DEBIAN_FRONTEND=noninteractive
|
|||||||
FROM wheels as trt-wheels
|
FROM wheels as trt-wheels
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
|
|
||||||
# Add TensorRT wheels to another folder
|
# Add TensorRT wheels to another folder
|
||||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||||
|
|
||||||
# Build CuDNN
|
|
||||||
FROM wget AS cudnn-deps
|
|
||||||
|
|
||||||
ARG COMPUTE_LEVEL
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y git build-essential
|
|
||||||
|
|
||||||
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
|
|
||||||
&& dpkg -i cuda-keyring_1.1-1_all.deb \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get -y install cuda-toolkit \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
FROM tensorrt-base AS frigate-tensorrt
|
FROM tensorrt-base AS frigate-tensorrt
|
||||||
ENV TRT_VER=8.5.3
|
ENV TRT_VER=8.6.1
|
||||||
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
|
||||||
ldconfig
|
ldconfig
|
||||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
@ -42,8 +28,8 @@ FROM devcontainer AS devcontainer-trt
|
|||||||
|
|
||||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
|
||||||
COPY docker/tensorrt/detector/rootfs/ /
|
COPY docker/tensorrt/detector/rootfs/ /
|
||||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||||
pip3 install -U /deps/trt-wheels/*.whl
|
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages
|
||||||
|
|||||||
@ -41,11 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
|
|||||||
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
||||||
|
|
||||||
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
||||||
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
|
||||||
|
|
||||||
RUN pip3 uninstall -y onnxruntime-openvino \
|
RUN pip3 uninstall -y onnxruntime-openvino \
|
||||||
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
|
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
|
||||||
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
|
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
|
||||||
|
|
||||||
FROM build-wheels AS trt-model-wheels
|
FROM build-wheels AS trt-model-wheels
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3
|
ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
|
||||||
|
|
||||||
# Build TensorRT-specific library
|
# Build TensorRT-specific library
|
||||||
FROM ${TRT_BASE} AS trt-deps
|
FROM ${TRT_BASE} AS trt-deps
|
||||||
@ -24,6 +24,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
|||||||
|
|
||||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||||
|
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
|
||||||
COPY docker/tensorrt/detector/rootfs/ /
|
COPY docker/tensorrt/detector/rootfs/ /
|
||||||
ENV YOLO_MODELS=""
|
ENV YOLO_MODELS=""
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
/usr/local/lib
|
/usr/local/lib
|
||||||
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
|
/usr/local/cuda/lib64
|
||||||
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
|
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
|
||||||
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
|
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
|
||||||
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib
|
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
|
||||||
/usr/local/lib/python3.9/dist-packages/tensorrt
|
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
|
||||||
|
/usr/local/lib/python3.11/dist-packages/tensorrt
|
||||||
|
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib
|
||||||
@ -1,14 +1,14 @@
|
|||||||
# NVidia TensorRT Support (amd64 only)
|
# NVidia TensorRT Support (amd64 only)
|
||||||
--extra-index-url 'https://pypi.nvidia.com'
|
--extra-index-url 'https://pypi.nvidia.com'
|
||||||
numpy < 1.24; platform_machine == 'x86_64'
|
numpy < 1.24; platform_machine == 'x86_64'
|
||||||
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
|
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
|
||||||
cuda-python == 11.8; platform_machine == 'x86_64'
|
cuda-python == 11.8.*; platform_machine == 'x86_64'
|
||||||
cython == 0.29.*; platform_machine == 'x86_64'
|
cython == 3.0.*; platform_machine == 'x86_64'
|
||||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
|
||||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||||
onnx==1.16.*; platform_machine == 'x86_64'
|
onnx==1.16.*; platform_machine == 'x86_64'
|
||||||
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
|
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
|
||||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||||
|
|||||||
@ -4,7 +4,9 @@ title: Advanced Options
|
|||||||
sidebar_label: Advanced Options
|
sidebar_label: Advanced Options
|
||||||
---
|
---
|
||||||
|
|
||||||
### `logger`
|
### Logging
|
||||||
|
|
||||||
|
#### Frigate `logger`
|
||||||
|
|
||||||
Change the default log level for troubleshooting purposes.
|
Change the default log level for troubleshooting purposes.
|
||||||
|
|
||||||
@ -28,6 +30,18 @@ Examples of available modules are:
|
|||||||
- `watchdog.<camera_name>`
|
- `watchdog.<camera_name>`
|
||||||
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
|
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
|
||||||
|
|
||||||
|
#### Go2RTC Logging
|
||||||
|
|
||||||
|
See [the go2rtc docs](for logging configuration)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
go2rtc:
|
||||||
|
streams:
|
||||||
|
...
|
||||||
|
log:
|
||||||
|
exec: trace
|
||||||
|
```
|
||||||
|
|
||||||
### `environment_vars`
|
### `environment_vars`
|
||||||
|
|
||||||
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
|
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
|
||||||
@ -174,7 +188,7 @@ NOTE: The folder that is set for the config needs to be the folder that contains
|
|||||||
|
|
||||||
### Custom go2rtc version
|
### Custom go2rtc version
|
||||||
|
|
||||||
Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc.
|
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc.
|
||||||
|
|
||||||
To do this:
|
To do this:
|
||||||
|
|
||||||
@ -189,16 +203,16 @@ When frigate starts up, it checks whether your config file is valid, and if it i
|
|||||||
|
|
||||||
### Via API
|
### Via API
|
||||||
|
|
||||||
Frigate can accept a new configuration file as JSON at the `/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
|
Frigate can accept a new configuration file as JSON at the `/api/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -X POST http://frigate_host:5000/config/save -d @config.json
|
curl -X POST http://frigate_host:5000/api/config/save -d @config.json
|
||||||
```
|
```
|
||||||
|
|
||||||
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
|
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yq r -j config.yml | curl -X POST http://frigate_host:5000/config/save -d @-
|
yq r -j config.yml | curl -X POST http://frigate_host:5000/api/config/save -d @-
|
||||||
```
|
```
|
||||||
|
|
||||||
### Via Command Line
|
### Via Command Line
|
||||||
|
|||||||
@ -24,6 +24,11 @@ On startup, an admin user and password are generated and printed in the logs. It
|
|||||||
|
|
||||||
In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file.
|
In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
auth:
|
||||||
|
reset_admin_password: true
|
||||||
|
```
|
||||||
|
|
||||||
## Login failure rate limiting
|
## Login failure rate limiting
|
||||||
|
|
||||||
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples).
|
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples).
|
||||||
|
|||||||
@ -41,6 +41,7 @@ cameras:
|
|||||||
...
|
...
|
||||||
onvif:
|
onvif:
|
||||||
# Required: host of the camera being connected to.
|
# Required: host of the camera being connected to.
|
||||||
|
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||||
host: 0.0.0.0
|
host: 0.0.0.0
|
||||||
# Optional: ONVIF port for device (default: shown below).
|
# Optional: ONVIF port for device (default: shown below).
|
||||||
port: 8000
|
port: 8000
|
||||||
@ -49,6 +50,8 @@ cameras:
|
|||||||
user: admin
|
user: admin
|
||||||
# Optional: password for login.
|
# Optional: password for login.
|
||||||
password: admin
|
password: admin
|
||||||
|
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||||
|
tls_insecure: False
|
||||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||||
# the center of the frame by automatically moving the PTZ camera.
|
# the center of the frame by automatically moving the PTZ camera.
|
||||||
autotracking:
|
autotracking:
|
||||||
@ -164,3 +167,7 @@ To maintain object tracking during PTZ moves, Frigate tracks the motion of your
|
|||||||
### Calibration seems to have completed, but the camera is not actually moving to track my object. Why?
|
### Calibration seems to have completed, but the camera is not actually moving to track my object. Why?
|
||||||
|
|
||||||
Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever.
|
Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever.
|
||||||
|
|
||||||
|
### Frigate reports an error saying that calibration has failed. Why?
|
||||||
|
|
||||||
|
Calibration measures the amount of time it takes for Frigate to make a series of movements with your PTZ. This error message is recorded in the log if these values are too high for Frigate to support calibrated autotracking. This is often the case when your camera's motor or network connection is too slow or your camera's firmware doesn't report the motor status in a timely manner. You can try running without calibration (just remove the `movement_weights` line from your config and restart), but if calibration fails, this often means that autotracking will behave unpredictably.
|
||||||
|
|||||||
@ -65,19 +65,32 @@ ffmpeg:
|
|||||||
|
|
||||||
## Model/vendor specific setup
|
## Model/vendor specific setup
|
||||||
|
|
||||||
|
### Amcrest & Dahua
|
||||||
|
|
||||||
|
Amcrest & Dahua cameras should be connected to via RTSP using the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=0 # this is the main stream
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=1 # this is the sub stream, typically supporting low resolutions only
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=2 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=3 # new higher end cameras support a fourth stream with another mid resolution (1280x720, 1920x1080)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### Annke C800
|
### Annke C800
|
||||||
|
|
||||||
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
|
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
annkec800: # <------ Name the camera
|
annkec800: # <------ Name the camera
|
||||||
ffmpeg:
|
ffmpeg:
|
||||||
|
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||||
output_args:
|
output_args:
|
||||||
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
|
record: preset-record-generic-audio-aac
|
||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
|
- path: rtsp://USERNAME:PASSWORD@CAMERA-IP/H264/ch1/main/av_stream # <----- Update for your camera
|
||||||
roles:
|
roles:
|
||||||
- detect
|
- detect
|
||||||
- record
|
- record
|
||||||
@ -95,6 +108,29 @@ ffmpeg:
|
|||||||
input_args: preset-rtsp-blue-iris
|
input_args: preset-rtsp-blue-iris
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Hikvision Cameras
|
||||||
|
|
||||||
|
Hikvision cameras should be connected to via RTSP using the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/101 # this is the main stream
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/102 # this is the sub stream, typically supporting low resolutions only
|
||||||
|
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/103 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
[Some users have reported](https://www.reddit.com/r/frigate_nvr/comments/1hg4ze7/hikvision_security_settings) that newer Hikvision cameras require adjustments to the security settings:
|
||||||
|
|
||||||
|
```
|
||||||
|
RTSP Authentication - digest/basic
|
||||||
|
RTSP Digest Algorithm - MD5
|
||||||
|
WEB Authentication - digest/basic
|
||||||
|
WEB Digest Algorithm - MD5
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
### Reolink Cameras
|
### Reolink Cameras
|
||||||
|
|
||||||
Reolink has older cameras (ex: 410 & 520) as well as newer camera (ex: 520a & 511wa) which support different subsets of options. In both cases using the http stream is recommended.
|
Reolink has older cameras (ex: 410 & 520) as well as newer camera (ex: 520a & 511wa) which support different subsets of options. In both cases using the http stream is recommended.
|
||||||
@ -156,7 +192,9 @@ cameras:
|
|||||||
|
|
||||||
#### Reolink Doorbell
|
#### Reolink Doorbell
|
||||||
|
|
||||||
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||||
|
|
||||||
|
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
|
|||||||
59
docs/docs/configuration/face_recognition.md
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
id: face_recognition
|
||||||
|
title: Face Recognition
|
||||||
|
---
|
||||||
|
|
||||||
|
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
|
||||||
|
|
||||||
|
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
||||||
|
|
||||||
|
## Minimum System Requirements
|
||||||
|
|
||||||
|
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
face_recognition:
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Dataset
|
||||||
|
|
||||||
|
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
|
||||||
|
|
||||||
|
- Diversity of the dataset: A dataset with diverse images, including variations in lighting, pose, and facial expressions, will require fewer images per person than a less diverse dataset.
|
||||||
|
- Desired accuracy: The higher the desired accuracy, the more images are typically needed.
|
||||||
|
|
||||||
|
However, here are some general guidelines:
|
||||||
|
|
||||||
|
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
|
||||||
|
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
|
||||||
|
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.
|
||||||
|
|
||||||
|
## Creating a Robust Training Set
|
||||||
|
|
||||||
|
The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
When choosing images to include in the face training set it is recommended to always follow these recommendations:
|
||||||
|
- If it is difficult to make out details in a persons face it will not be helpful in training.
|
||||||
|
- Avoid images with under/over-exposure.
|
||||||
|
- Avoid blurry / pixelated images.
|
||||||
|
- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training.
|
||||||
|
- Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
### Step 1 - Building a Strong Foundation
|
||||||
|
|
||||||
|
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point.
|
||||||
|
|
||||||
|
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step.
|
||||||
|
|
||||||
|
### Step 2 - Expanding The Dataset
|
||||||
|
|
||||||
|
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
|
||||||
@ -5,6 +5,8 @@ title: Generative AI
|
|||||||
|
|
||||||
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||||
|
|
||||||
|
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
Semantic Search must be enabled to use Generative AI.
|
Semantic Search must be enabled to use Generative AI.
|
||||||
@ -13,9 +15,9 @@ Semantic Search must be enabled to use Generative AI.
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 providers available to integrate with Frigate.
|
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||||
|
|
||||||
If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
genai:
|
genai:
|
||||||
@ -114,6 +116,12 @@ genai:
|
|||||||
model: gpt-4o
|
model: gpt-4o
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Azure OpenAI
|
## Azure OpenAI
|
||||||
|
|
||||||
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||||
|
|||||||
@ -175,6 +175,16 @@ For more information on the various values across different distributions, see h
|
|||||||
|
|
||||||
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
||||||
|
|
||||||
|
#### Stats for SR-IOV devices
|
||||||
|
|
||||||
|
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
telemetry:
|
||||||
|
stats:
|
||||||
|
sriov: True
|
||||||
|
```
|
||||||
|
|
||||||
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||||
|
|
||||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||||
|
|||||||
@ -203,14 +203,13 @@ detectors:
|
|||||||
ov:
|
ov:
|
||||||
type: openvino
|
type: openvino
|
||||||
device: AUTO
|
device: AUTO
|
||||||
model:
|
|
||||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
|
||||||
|
|
||||||
model:
|
model:
|
||||||
width: 300
|
width: 300
|
||||||
height: 300
|
height: 300
|
||||||
input_tensor: nhwc
|
input_tensor: nhwc
|
||||||
input_pixel_format: bgr
|
input_pixel_format: bgr
|
||||||
|
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||||
|
|
||||||
record:
|
record:
|
||||||
|
|||||||
45
docs/docs/configuration/license_plate_recognition.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
id: license_plate_recognition
|
||||||
|
title: License Plate Recognition (LPR)
|
||||||
|
---
|
||||||
|
|
||||||
|
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
|
||||||
|
|
||||||
|
Users running a Frigate+ model should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||||
|
|
||||||
|
LPR is most effective when the vehicle’s license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining its detection and keeping the most confident result. LPR will not run on stationary vehicles.
|
||||||
|
|
||||||
|
## Minimum System Requirements
|
||||||
|
|
||||||
|
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
License plate recognition is disabled by default. Enable it in your config file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
lpr:
|
||||||
|
enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Advanced Configuration
|
||||||
|
|
||||||
|
Several options are available to fine-tune the LPR feature. For example, you can adjust the `min_area` setting, which defines the minimum size in pixels a license plate must be before LPR runs. The default is 500 pixels.
|
||||||
|
|
||||||
|
Additionally, you can define `known_plates` as strings or regular expressions, allowing Frigate to label tracked vehicles with custom sub_labels when a recognized plate is detected. This information is then accessible in the UI, filters, and notifications.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
lpr:
|
||||||
|
enabled: true
|
||||||
|
min_area: 500
|
||||||
|
known_plates:
|
||||||
|
Wife's Car:
|
||||||
|
- "ABC-1234"
|
||||||
|
- "ABC-I234"
|
||||||
|
Johnny:
|
||||||
|
- "J*N-*234" # Using wildcards for H/M and 1/I
|
||||||
|
Sally:
|
||||||
|
- "[S5]LL-1234" # Matches SLL-1234 and 5LL-1234
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, "Wife's Car" will appear as the label for any vehicle matching the plate "ABC-1234." The model might occasionally interpret the digit 1 as a capital I (e.g., "ABC-I234"), so both variations are listed. Similarly, multiple possible variations are specified for Johnny and Sally.
|
||||||
@ -3,9 +3,9 @@ id: live
|
|||||||
title: Live View
|
title: Live View
|
||||||
---
|
---
|
||||||
|
|
||||||
Frigate intelligently displays your camera streams on the Live view dashboard. Your camera images update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any motion is detected, cameras seamlessly switch to a live stream.
|
Frigate intelligently displays your camera streams on the Live view dashboard. By default, Frigate employs "smart streaming" where camera images update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any motion or active objects are detected, cameras seamlessly switch to a live stream.
|
||||||
|
|
||||||
## Live View technologies
|
### Live View technologies
|
||||||
|
|
||||||
Frigate intelligently uses three different streaming technologies to display your camera streams on the dashboard and the single camera view, switching between available modes based on network bandwidth, player errors, or required features like two-way talk. The highest quality and fluency of the Live view requires the bundled `go2rtc` to be configured as shown in the [step by step guide](/guides/configuring_go2rtc).
|
Frigate intelligently uses three different streaming technologies to display your camera streams on the dashboard and the single camera view, switching between available modes based on network bandwidth, player errors, or required features like two-way talk. The highest quality and fluency of the Live view requires the bundled `go2rtc` to be configured as shown in the [step by step guide](/guides/configuring_go2rtc).
|
||||||
|
|
||||||
@ -23,13 +23,13 @@ If you are using go2rtc, you should adjust the following settings in your camera
|
|||||||
|
|
||||||
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
|
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
|
||||||
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
|
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
|
||||||
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
|
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well.
|
||||||
|
|
||||||
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
|
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
|
||||||
|
|
||||||
### Audio Support
|
### Audio Support
|
||||||
|
|
||||||
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
@ -51,19 +51,32 @@ go2rtc:
|
|||||||
- ffmpeg:rtsp://192.168.1.5:554/live0#video=copy
|
- ffmpeg:rtsp://192.168.1.5:554/live0#video=copy
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setting Stream For Live UI
|
### Setting Streams For Live UI
|
||||||
|
|
||||||
There may be some cameras that you would prefer to use the sub stream for live view, but the main stream for recording. This can be done via `live -> stream_name`.
|
You can configure Frigate to allow manual selection of the stream you want to view in the Live UI. For example, you may want to view your camera's substream on mobile devices, but the full resolution stream on desktop devices. Setting the `live -> streams` list will populate a dropdown in the UI's Live view that allows you to choose between the streams. This stream setting is _per device_ and is saved in your browser's local storage.
|
||||||
|
|
||||||
|
Additionally, when creating and editing camera groups in the UI, you can choose the stream you want to use for your camera group's Live dashboard.
|
||||||
|
|
||||||
|
::: note
|
||||||
|
|
||||||
|
Frigate's default dashboard ("All Cameras") will always use the first entry you've defined in `streams:` when playing live streams from your cameras.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
Configure the `streams` option with a "friendly name" for your stream followed by the go2rtc stream name.
|
||||||
|
|
||||||
|
Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the `streams` configuration, only go2rtc stream names.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
streams:
|
streams:
|
||||||
test_cam:
|
test_cam:
|
||||||
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio.
|
- rtsp://192.168.1.5:554/live_main # <- stream which supports video & aac audio.
|
||||||
- "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
|
- "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
|
||||||
test_cam_sub:
|
test_cam_sub:
|
||||||
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio.
|
- rtsp://192.168.1.5:554/live_sub # <- stream which supports video & aac audio.
|
||||||
- "ffmpeg:test_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
|
test_cam_another_sub:
|
||||||
|
- rtsp://192.168.1.5:554/live_alt # <- stream which supports video & aac audio.
|
||||||
|
|
||||||
cameras:
|
cameras:
|
||||||
test_cam:
|
test_cam:
|
||||||
@ -80,7 +93,10 @@ cameras:
|
|||||||
roles:
|
roles:
|
||||||
- detect
|
- detect
|
||||||
live:
|
live:
|
||||||
stream_name: test_cam_sub
|
streams: # <--- Multiple streams for Frigate 0.16 and later
|
||||||
|
Main Stream: test_cam # <--- Specify a "friendly name" followed by the go2rtc stream name
|
||||||
|
Sub Stream: test_cam_sub
|
||||||
|
Special Stream: test_cam_another_sub
|
||||||
```
|
```
|
||||||
|
|
||||||
### WebRTC extra configuration:
|
### WebRTC extra configuration:
|
||||||
@ -101,6 +117,7 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
|
|||||||
```
|
```
|
||||||
|
|
||||||
- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block.
|
- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block.
|
||||||
|
- Note that WebRTC does not support H.265.
|
||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
|
|
||||||
@ -138,3 +155,60 @@ services:
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
|
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
|
||||||
|
|
||||||
|
### Two way talk
|
||||||
|
|
||||||
|
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
|
||||||
|
|
||||||
|
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
|
||||||
|
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||||
|
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
|
||||||
|
|
||||||
|
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||||
|
|
||||||
|
### Streaming options on camera group dashboards
|
||||||
|
|
||||||
|
Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage.
|
||||||
|
|
||||||
|
- Stream selection using the `live -> streams` configuration option (see _Setting Streams For Live UI_ above)
|
||||||
|
- Streaming type:
|
||||||
|
- _No streaming_: Camera images will only update once per minute and no live streaming will occur.
|
||||||
|
- _Smart Streaming_ (default, recommended setting): Smart streaming will update your camera image once per minute when no detectable activity is occurring to conserve bandwidth and resources, since a static picture is the same as a streaming image with no motion or objects. When motion or objects are detected, the image seamlessly switches to a live stream.
|
||||||
|
- _Continuous Streaming_: Camera image will always be a live stream when visible on the dashboard, even if no activity is being detected. Continuous streaming may cause high bandwidth usage and performance issues. **Use with caution.**
|
||||||
|
- _Compatibility mode_: Enable this option only if your camera's live stream is displaying color artifacts and has a diagonal line on the right side of the image. Before enabling this, try setting your camera's `detect` width and height to a standard aspect ratio (for example: 640x352 becomes 640x360, and 800x443 becomes 800x450, 2688x1520 becomes 2688x1512, etc). Depending on your browser and device, more than a few cameras in compatibility mode may not be supported, so only use this option if changing your config fails to resolve the color artifacts and diagonal line.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
The default dashboard ("All Cameras") will always use Smart Streaming and the first entry set in your `streams` configuration, if defined. Use a camera group if you want to change any of these settings from the defaults.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Live view FAQ
|
||||||
|
|
||||||
|
1. Why don't I have audio in my Live view?
|
||||||
|
You must use go2rtc to hear audio in your live streams. If you have go2rtc already configured, you need to ensure your camera is sending PCMA/PCMU or AAC audio. If you can't change your camera's audio codec, you need to [transcode the audio](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg) using go2rtc.
|
||||||
|
|
||||||
|
Note that the low bandwidth mode player is a video-only stream. You should not expect to hear audio when in low bandwidth mode, even if you've set up go2rtc.
|
||||||
|
|
||||||
|
2. Frigate shows that my live stream is in "low bandwidth mode". What does this mean?
|
||||||
|
Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible.
|
||||||
|
|
||||||
|
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream.
|
||||||
|
|
||||||
|
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available.
|
||||||
|
|
||||||
|
3. It doesn't seem like my cameras are streaming on the Live dashboard. Why?
|
||||||
|
On the default Live dashboard ("All Cameras"), your camera images will update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity is detected, cameras seamlessly switch to a full-resolution live stream. If you want to customize this behavior, use a camera group.
|
||||||
|
|
||||||
|
4. I see a strange diagonal line on my live view, but my recordings look fine. How can I fix it?
|
||||||
|
This is caused by incorrect dimensions set in your detect width or height (or incorrectly auto-detected), causing the jsmpeg player's rendering engine to display a slightly distorted image. You should enlarge the width and height of your `detect` resolution up to a standard aspect ratio (example: 640x352 becomes 640x360, and 800x443 becomes 800x450, 2688x1520 becomes 2688x1512, etc). If changing the resolution to match a standard (4:3, 16:9, or 32:9, etc) aspect ratio does not solve the issue, you can enable "compatibility mode" in your camera group dashboard's stream settings. Depending on your browser and device, more than a few cameras in compatibility mode may not be supported, so only use this option if changing your `detect` width and height fails to resolve the color artifacts and diagonal line.
|
||||||
|
|
||||||
|
5. How does "smart streaming" work?
|
||||||
|
Because a static image of a scene looks exactly the same as a live stream with no motion or activity, smart streaming updates your camera images once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity (motion or object/audio detection) occurs, cameras seamlessly switch to a live stream.
|
||||||
|
|
||||||
|
This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats.
|
||||||
|
|
||||||
|
This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras.
|
||||||
|
|
||||||
|
6. I have unmuted some cameras on my dashboard, but I do not hear sound. Why?
|
||||||
|
If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior.
|
||||||
|
|||||||
99
docs/docs/configuration/metrics.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
---
|
||||||
|
id: metrics
|
||||||
|
title: Metrics
|
||||||
|
---
|
||||||
|
|
||||||
|
# Metrics
|
||||||
|
|
||||||
|
Frigate exposes Prometheus metrics at the `/api/metrics` endpoint that can be used to monitor the performance and health of your Frigate instance.
|
||||||
|
|
||||||
|
## Available Metrics
|
||||||
|
|
||||||
|
### System Metrics
|
||||||
|
- `frigate_cpu_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process CPU usage percentage
|
||||||
|
- `frigate_mem_usage_percent{pid="", name="", process="", type="", cmdline=""}` - Process memory usage percentage
|
||||||
|
- `frigate_gpu_usage_percent{gpu_name=""}` - GPU utilization percentage
|
||||||
|
- `frigate_gpu_mem_usage_percent{gpu_name=""}` - GPU memory usage percentage
|
||||||
|
|
||||||
|
### Camera Metrics
|
||||||
|
- `frigate_camera_fps{camera_name=""}` - Frames per second being consumed from your camera
|
||||||
|
- `frigate_detection_fps{camera_name=""}` - Number of times detection is run per second
|
||||||
|
- `frigate_process_fps{camera_name=""}` - Frames per second being processed
|
||||||
|
- `frigate_skipped_fps{camera_name=""}` - Frames per second skipped for processing
|
||||||
|
- `frigate_detection_enabled{camera_name=""}` - Detection enabled status for camera
|
||||||
|
- `frigate_audio_dBFS{camera_name=""}` - Audio dBFS for camera
|
||||||
|
- `frigate_audio_rms{camera_name=""}` - Audio RMS for camera
|
||||||
|
|
||||||
|
### Detector Metrics
|
||||||
|
- `frigate_detector_inference_speed_seconds{name=""}` - Time spent running object detection in seconds
|
||||||
|
- `frigate_detection_start{name=""}` - Detector start time (unix timestamp)
|
||||||
|
|
||||||
|
### Storage Metrics
|
||||||
|
- `frigate_storage_free_bytes{storage=""}` - Storage free bytes
|
||||||
|
- `frigate_storage_total_bytes{storage=""}` - Storage total bytes
|
||||||
|
- `frigate_storage_used_bytes{storage=""}` - Storage used bytes
|
||||||
|
- `frigate_storage_mount_type{mount_type="", storage=""}` - Storage mount type info
|
||||||
|
|
||||||
|
### Service Metrics
|
||||||
|
- `frigate_service_uptime_seconds` - Uptime in seconds
|
||||||
|
- `frigate_service_last_updated_timestamp` - Stats recorded time (unix timestamp)
|
||||||
|
- `frigate_device_temperature{device=""}` - Device Temperature
|
||||||
|
|
||||||
|
### Event Metrics
|
||||||
|
- `frigate_camera_events{camera="", label=""}` - Count of camera events since exporter started
|
||||||
|
|
||||||
|
## Configuring Prometheus
|
||||||
|
|
||||||
|
To scrape metrics from Frigate, add the following to your Prometheus configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'frigate'
|
||||||
|
metrics_path: '/api/metrics'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['frigate:5000']
|
||||||
|
scrape_interval: 15s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example Queries
|
||||||
|
|
||||||
|
Here are some example PromQL queries that might be useful:
|
||||||
|
|
||||||
|
```promql
|
||||||
|
# Average CPU usage across all processes
|
||||||
|
avg(frigate_cpu_usage_percent)
|
||||||
|
|
||||||
|
# Total GPU memory usage
|
||||||
|
sum(frigate_gpu_mem_usage_percent)
|
||||||
|
|
||||||
|
# Detection FPS by camera
|
||||||
|
rate(frigate_detection_fps{camera_name="front_door"}[5m])
|
||||||
|
|
||||||
|
# Storage usage percentage
|
||||||
|
(frigate_storage_used_bytes / frigate_storage_total_bytes) * 100
|
||||||
|
|
||||||
|
# Event count by camera in last hour
|
||||||
|
increase(frigate_camera_events[1h])
|
||||||
|
```
|
||||||
|
|
||||||
|
## Grafana Dashboard
|
||||||
|
|
||||||
|
You can use these metrics to create Grafana dashboards to monitor your Frigate instance. Here's an example of metrics you might want to track:
|
||||||
|
|
||||||
|
- CPU, Memory and GPU usage over time
|
||||||
|
- Camera FPS and detection rates
|
||||||
|
- Storage usage and trends
|
||||||
|
- Event counts by camera
|
||||||
|
- System temperatures
|
||||||
|
|
||||||
|
A sample Grafana dashboard JSON will be provided in a future update.
|
||||||
|
|
||||||
|
## Metric Types
|
||||||
|
|
||||||
|
The metrics exposed by Frigate use the following Prometheus metric types:
|
||||||
|
|
||||||
|
- **Counter**: Cumulative values that only increase (e.g., `frigate_camera_events`)
|
||||||
|
- **Gauge**: Values that can go up and down (e.g., `frigate_cpu_usage_percent`)
|
||||||
|
- **Info**: Key-value pairs for metadata (e.g., `frigate_storage_mount_type`)
|
||||||
|
|
||||||
|
For more information about Prometheus metric types, see the [Prometheus documentation](https://prometheus.io/docs/concepts/metric_types/).
|
||||||
@ -33,6 +33,14 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Multiple detectors can not be mixed for object detection (ex: OpenVINO and Coral EdgeTPU can not be used for object detection at the same time).
|
||||||
|
|
||||||
|
This does not affect using hardware for accelerating other tasks such as [semantic search](./semantic_search.md)
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
# Officially Supported Detectors
|
# Officially Supported Detectors
|
||||||
|
|
||||||
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||||
@ -116,6 +124,30 @@ detectors:
|
|||||||
device: pci
|
device: pci
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Hailo-8l
|
||||||
|
|
||||||
|
This detector is available for use with Hailo-8 AI Acceleration Module.
|
||||||
|
|
||||||
|
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
detectors:
|
||||||
|
hailo8l:
|
||||||
|
type: hailo8l
|
||||||
|
device: PCIe
|
||||||
|
|
||||||
|
model:
|
||||||
|
width: 300
|
||||||
|
height: 300
|
||||||
|
input_tensor: nhwc
|
||||||
|
input_pixel_format: bgr
|
||||||
|
model_type: ssd
|
||||||
|
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## OpenVINO Detector
|
## OpenVINO Detector
|
||||||
|
|
||||||
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
|
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
|
||||||
@ -144,7 +176,9 @@ detectors:
|
|||||||
|
|
||||||
#### SSDLite MobileNet v2
|
#### SSDLite MobileNet v2
|
||||||
|
|
||||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
|
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
|
||||||
|
|
||||||
|
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
detectors:
|
detectors:
|
||||||
@ -254,6 +288,7 @@ yolov4x-mish-640
|
|||||||
yolov7-tiny-288
|
yolov7-tiny-288
|
||||||
yolov7-tiny-416
|
yolov7-tiny-416
|
||||||
yolov7-640
|
yolov7-640
|
||||||
|
yolov7-416
|
||||||
yolov7-320
|
yolov7-320
|
||||||
yolov7x-640
|
yolov7x-640
|
||||||
yolov7x-320
|
yolov7x-320
|
||||||
@ -282,6 +317,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
|
|||||||
|
|
||||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||||
|
|
||||||
|
Use the config below to work with generated TRT models:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
detectors:
|
detectors:
|
||||||
tensorrt:
|
tensorrt:
|
||||||
@ -501,11 +538,12 @@ detectors:
|
|||||||
cpu1:
|
cpu1:
|
||||||
type: cpu
|
type: cpu
|
||||||
num_threads: 3
|
num_threads: 3
|
||||||
model:
|
|
||||||
path: "/custom_model.tflite"
|
|
||||||
cpu2:
|
cpu2:
|
||||||
type: cpu
|
type: cpu
|
||||||
num_threads: 3
|
num_threads: 3
|
||||||
|
|
||||||
|
model:
|
||||||
|
path: "/custom_model.tflite"
|
||||||
```
|
```
|
||||||
|
|
||||||
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
|
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
|
||||||
@ -544,7 +582,7 @@ Hardware accelerated object detection is supported on the following SoCs:
|
|||||||
- RK3576
|
- RK3576
|
||||||
- RK3588
|
- RK3588
|
||||||
|
|
||||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@ -619,26 +657,36 @@ $ cat /sys/kernel/debug/rknpu/load
|
|||||||
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
||||||
|
|
||||||
## Hailo-8l
|
### Converting your own onnx model to rknn format
|
||||||
|
|
||||||
This detector is available for use with Hailo-8 AI Acceleration Module.
|
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
|
||||||
|
|
||||||
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
|
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
|
||||||
|
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
|
||||||
|
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
|
||||||
|
|
||||||
### Configuration
|
This is an example configuration file that you need to adjust to your specific onnx model:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
detectors:
|
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
|
||||||
hailo8l:
|
quantization: false
|
||||||
type: hailo8l
|
|
||||||
device: PCIe
|
|
||||||
model:
|
|
||||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
|
||||||
|
|
||||||
model:
|
output_name: "{input_basename}"
|
||||||
width: 300
|
|
||||||
height: 300
|
config:
|
||||||
input_tensor: nhwc
|
mean_values: [[0, 0, 0]]
|
||||||
input_pixel_format: bgr
|
std_values: [[255, 255, 255]]
|
||||||
model_type: ssd
|
quant_img_rgb2bgr: true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Explanation of the paramters:
|
||||||
|
|
||||||
|
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
|
||||||
|
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
|
||||||
|
- `output_name`: The output name of the model. The following variables are available:
|
||||||
|
- `quant`: "i8" or "fp16" depending on the config
|
||||||
|
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
|
||||||
|
- `soc`: the SoC this model was build for (e.g. "rk3588")
|
||||||
|
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
|
||||||
|
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
|
||||||
|
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||||
@ -34,7 +34,7 @@ False positives can also be reduced by filtering a detection based on its shape.
|
|||||||
|
|
||||||
### Object Area
|
### Object Area
|
||||||
|
|
||||||
`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter.
|
`min_area` and `max_area` filter on the area of an objects bounding box and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. These values can either be in pixels or as a percentage of the frame (for example, 0.12 represents 12% of the frame).
|
||||||
|
|
||||||
### Object Proportions
|
### Object Proportions
|
||||||
|
|
||||||
|
|||||||
@ -46,13 +46,18 @@ mqtt:
|
|||||||
tls_insecure: false
|
tls_insecure: false
|
||||||
# Optional: interval in seconds for publishing stats (default: shown below)
|
# Optional: interval in seconds for publishing stats (default: shown below)
|
||||||
stats_interval: 60
|
stats_interval: 60
|
||||||
|
# Optional: QoS level for subscriptions and publishing (default: shown below)
|
||||||
|
# 0 = at most once
|
||||||
|
# 1 = at least once
|
||||||
|
# 2 = exactly once
|
||||||
|
qos: 0
|
||||||
|
|
||||||
# Optional: Detectors configuration. Defaults to a single CPU detector
|
# Optional: Detectors configuration. Defaults to a single CPU detector
|
||||||
detectors:
|
detectors:
|
||||||
# Required: name of the detector
|
# Required: name of the detector
|
||||||
detector_name:
|
detector_name:
|
||||||
# Required: type of the detector
|
# Required: type of the detector
|
||||||
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
|
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
|
||||||
# Additional detector types can also be plugged in.
|
# Additional detector types can also be plugged in.
|
||||||
# Detectors may require additional configuration.
|
# Detectors may require additional configuration.
|
||||||
# Refer to the Detectors configuration page for more information.
|
# Refer to the Detectors configuration page for more information.
|
||||||
@ -117,25 +122,27 @@ auth:
|
|||||||
hash_iterations: 600000
|
hash_iterations: 600000
|
||||||
|
|
||||||
# Optional: model modifications
|
# Optional: model modifications
|
||||||
|
# NOTE: The default values are for the EdgeTPU detector.
|
||||||
|
# Other detectors will require the model config to be set.
|
||||||
model:
|
model:
|
||||||
# Optional: path to the model (default: automatic based on detector)
|
# Required: path to the model (default: automatic based on detector)
|
||||||
path: /edgetpu_model.tflite
|
path: /edgetpu_model.tflite
|
||||||
# Optional: path to the labelmap (default: shown below)
|
# Required: path to the labelmap (default: shown below)
|
||||||
labelmap_path: /labelmap.txt
|
labelmap_path: /labelmap.txt
|
||||||
# Required: Object detection model input width (default: shown below)
|
# Required: Object detection model input width (default: shown below)
|
||||||
width: 320
|
width: 320
|
||||||
# Required: Object detection model input height (default: shown below)
|
# Required: Object detection model input height (default: shown below)
|
||||||
height: 320
|
height: 320
|
||||||
# Optional: Object detection model input colorspace
|
# Required: Object detection model input colorspace
|
||||||
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
||||||
input_pixel_format: rgb
|
input_pixel_format: rgb
|
||||||
# Optional: Object detection model input tensor format
|
# Required: Object detection model input tensor format
|
||||||
# Valid values are nhwc or nchw (default: shown below)
|
# Valid values are nhwc or nchw (default: shown below)
|
||||||
input_tensor: nhwc
|
input_tensor: nhwc
|
||||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
# Required: Object detection model type, currently only used with the OpenVINO detector
|
||||||
# Valid values are ssd, yolox, yolonas (default: shown below)
|
# Valid values are ssd, yolox, yolonas (default: shown below)
|
||||||
model_type: ssd
|
model_type: ssd
|
||||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
# Required: Label name modifications. These are merged into the standard labelmap.
|
||||||
labelmap:
|
labelmap:
|
||||||
2: vehicle
|
2: vehicle
|
||||||
# Optional: Map of object labels to their attribute labels (default: depends on model)
|
# Optional: Map of object labels to their attribute labels (default: depends on model)
|
||||||
@ -242,6 +249,8 @@ ffmpeg:
|
|||||||
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
||||||
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
||||||
retry_interval: 10
|
retry_interval: 10
|
||||||
|
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
|
||||||
|
apple_compatibility: false
|
||||||
|
|
||||||
# Optional: Detect configuration
|
# Optional: Detect configuration
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
@ -308,9 +317,11 @@ objects:
|
|||||||
# Optional: filters to reduce false positives for specific object types
|
# Optional: filters to reduce false positives for specific object types
|
||||||
filters:
|
filters:
|
||||||
person:
|
person:
|
||||||
# Optional: minimum width*height of the bounding box for the detected object (default: 0)
|
# Optional: minimum size of the bounding box for the detected object (default: 0).
|
||||||
|
# Can be specified as an integer for width*height in pixels or as a decimal representing the percentage of the frame (0.000001 to 0.99).
|
||||||
min_area: 5000
|
min_area: 5000
|
||||||
# Optional: maximum width*height of the bounding box for the detected object (default: 24000000)
|
# Optional: maximum size of the bounding box for the detected object (default: 24000000).
|
||||||
|
# Can be specified as an integer for width*height in pixels or as a decimal representing the percentage of the frame (0.000001 to 0.99).
|
||||||
max_area: 100000
|
max_area: 100000
|
||||||
# Optional: minimum width/height of the bounding box for the detected object (default: 0)
|
# Optional: minimum width/height of the bounding box for the detected object (default: 0)
|
||||||
min_ratio: 0.5
|
min_ratio: 0.5
|
||||||
@ -522,6 +533,14 @@ semantic_search:
|
|||||||
# NOTE: small model runs on CPU and large model runs on GPU
|
# NOTE: small model runs on CPU and large model runs on GPU
|
||||||
model_size: "small"
|
model_size: "small"
|
||||||
|
|
||||||
|
# Optional: Configuration for face recognition capability
|
||||||
|
face_recognition:
|
||||||
|
# Optional: Enable semantic search (default: shown below)
|
||||||
|
enabled: False
|
||||||
|
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||||
|
# NOTE: small model runs on CPU and large model runs on GPU
|
||||||
|
model_size: "small"
|
||||||
|
|
||||||
# Optional: Configuration for AI generated tracked object descriptions
|
# Optional: Configuration for AI generated tracked object descriptions
|
||||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||||
@ -546,15 +565,19 @@ genai:
|
|||||||
|
|
||||||
# Optional: Restream configuration
|
# Optional: Restream configuration
|
||||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
||||||
|
# NOTE: The default go2rtc API port (1984) must be used,
|
||||||
|
# changing this port for the integrated go2rtc instance is not supported.
|
||||||
go2rtc:
|
go2rtc:
|
||||||
|
|
||||||
# Optional: Live stream configuration for WebUI.
|
# Optional: Live stream configuration for WebUI.
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
live:
|
live:
|
||||||
# Optional: Set the name of the stream configured in go2rtc
|
# Optional: Set the streams configured in go2rtc
|
||||||
# that should be used for live view in frigate WebUI. (default: name of camera)
|
# that should be used for live view in frigate WebUI. (default: name of camera)
|
||||||
# NOTE: In most cases this should be set at the camera level only.
|
# NOTE: In most cases this should be set at the camera level only.
|
||||||
stream_name: camera_name
|
streams:
|
||||||
|
main_stream: main_stream_name
|
||||||
|
sub_stream: sub_stream_name
|
||||||
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
# Optional: Set the height of the jsmpeg stream. (default: 720)
|
||||||
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
# This must be less than or equal to the height of the detect stream. Lower resolutions
|
||||||
# reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio.
|
# reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio.
|
||||||
@ -689,6 +712,7 @@ cameras:
|
|||||||
# to enable PTZ controls.
|
# to enable PTZ controls.
|
||||||
onvif:
|
onvif:
|
||||||
# Required: host of the camera being connected to.
|
# Required: host of the camera being connected to.
|
||||||
|
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||||
host: 0.0.0.0
|
host: 0.0.0.0
|
||||||
# Optional: ONVIF port for device (default: shown below).
|
# Optional: ONVIF port for device (default: shown below).
|
||||||
port: 8000
|
port: 8000
|
||||||
@ -697,6 +721,8 @@ cameras:
|
|||||||
user: admin
|
user: admin
|
||||||
# Optional: password for login.
|
# Optional: password for login.
|
||||||
password: admin
|
password: admin
|
||||||
|
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||||
|
tls_insecure: False
|
||||||
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
|
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
|
||||||
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
|
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
|
||||||
ignore_time_mismatch: False
|
ignore_time_mismatch: False
|
||||||
@ -760,6 +786,8 @@ cameras:
|
|||||||
- cat
|
- cat
|
||||||
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
||||||
required_zones: []
|
required_zones: []
|
||||||
|
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
|
||||||
|
debug_save_thumbnails: False
|
||||||
|
|
||||||
# Optional
|
# Optional
|
||||||
ui:
|
ui:
|
||||||
@ -804,11 +832,13 @@ telemetry:
|
|||||||
- lo
|
- lo
|
||||||
# Optional: Configure system stats
|
# Optional: Configure system stats
|
||||||
stats:
|
stats:
|
||||||
# Enable AMD GPU stats (default: shown below)
|
# Optional: Enable AMD GPU stats (default: shown below)
|
||||||
amd_gpu_stats: True
|
amd_gpu_stats: True
|
||||||
# Enable Intel GPU stats (default: shown below)
|
# Optional: Enable Intel GPU stats (default: shown below)
|
||||||
intel_gpu_stats: True
|
intel_gpu_stats: True
|
||||||
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
|
||||||
|
sriov: False
|
||||||
|
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
||||||
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
||||||
network_bandwidth: False
|
network_bandwidth: False
|
||||||
# Optional: Enable the latest version outbound check (default: shown below)
|
# Optional: Enable the latest version outbound check (default: shown below)
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
---
|
---
|
||||||
id: semantic_search
|
id: semantic_search
|
||||||
title: Using Semantic Search
|
title: Semantic Search
|
||||||
---
|
---
|
||||||
|
|
||||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||||
|
|
||||||
Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally.
|
||||||
|
|
||||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
@ -29,9 +29,9 @@ semantic_search:
|
|||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
|
|
||||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
|
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again.
|
||||||
|
|
||||||
If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -39,9 +39,9 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
|||||||
|
|
||||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||||
|
|
||||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||||
|
|
||||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
semantic_search:
|
semantic_search:
|
||||||
@ -50,7 +50,7 @@ semantic_search:
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||||
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||||
|
|
||||||
### GPU Acceleration
|
### GPU Acceleration
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ If the correct build is used for your GPU and the `large` model is configured, t
|
|||||||
|
|
||||||
## Usage and Best Practices
|
## Usage and Best Practices
|
||||||
|
|
||||||
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
|
1. Semantic Search is used in conjunction with the other filters available on the Explore page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||||
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
||||||
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
||||||
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||||
|
|||||||
@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
|
|||||||
- Encode Mode: H.264
|
- Encode Mode: H.264
|
||||||
- Resolution: 2688\*1520
|
- Resolution: 2688\*1520
|
||||||
- Frame Rate(FPS): 15
|
- Frame Rate(FPS): 15
|
||||||
- I Frame Interval: 30
|
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info)
|
||||||
|
|
||||||
**Sub Stream (Detection)**
|
**Sub Stream (Detection)**
|
||||||
|
|
||||||
|
|||||||
@ -13,20 +13,19 @@ Many users have reported various issues with Reolink cameras, so I do not recomm
|
|||||||
|
|
||||||
Here are some of the camera's I recommend:
|
Here are some of the camera's I recommend:
|
||||||
|
|
||||||
- <a href="https://amzn.to/3uFLtxB" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) T5442TM-AS-LED</a> (affiliate link)
|
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
|
||||||
- <a href="https://amzn.to/3isJ3gU" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T5442TM-AS</a> (affiliate link)
|
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
|
||||||
- <a href="https://amzn.to/2ZWNWIA" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-28MM</a> (affiliate link)
|
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
|
||||||
|
|
||||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||||
|
|
||||||
## Server
|
## Server
|
||||||
|
|
||||||
My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||||
|
|
||||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
|
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||||
| Beelink EQ12 (<a href="https://amzn.to/3OlTMJY" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
| Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||||
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
|
|
||||||
|
|
||||||
## Detectors
|
## Detectors
|
||||||
|
|
||||||
@ -52,24 +51,25 @@ The OpenVINO detector type is able to run on:
|
|||||||
|
|
||||||
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
|
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
|
||||||
|
|
||||||
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
|
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
|
||||||
|
|
||||||
| Name | Inference Speed | Notes |
|
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
|
||||||
| -------------------- | --------------- | --------------------------------------------------------------------- |
|
| -------------------- | -------------------------- | ------------------------- | -------------------------------------- |
|
||||||
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
|
| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance |
|
||||||
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
|
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
|
||||||
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
|
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
|
||||||
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
|
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
|
||||||
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
|
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
|
||||||
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
|
| Intel i3 8100 | ~ 15 ms | | |
|
||||||
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
|
| Intel i5 4590 | ~ 20 ms | | |
|
||||||
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
|
| Intel i5 6500 | ~ 15 ms | | |
|
||||||
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
|
| Intel i5 7200u | 15 - 25 ms | | |
|
||||||
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
|
| Intel i5 7500 | ~ 15 ms | | |
|
||||||
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
|
| Intel i5 1135G7 | 10 - 15 ms | | |
|
||||||
| Intel i5 1135G7 | 10 - 15 ms | |
|
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
|
||||||
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
|
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
|
||||||
| Intel Arc A750 | ~ 4 ms | |
|
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
|
||||||
|
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
|
||||||
|
|
||||||
### TensorRT - Nvidia GPU
|
### TensorRT - Nvidia GPU
|
||||||
|
|
||||||
@ -78,29 +78,35 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
|
|||||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||||
|
|
||||||
| Name | Inference Speed |
|
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
|
||||||
| --------------- | --------------- |
|
| --------------- | --------------------- | ------------------------- |
|
||||||
| GTX 1060 6GB | ~ 7 ms |
|
| GTX 1060 6GB | ~ 7 ms | |
|
||||||
| GTX 1070 | ~ 6 ms |
|
| GTX 1070 | ~ 6 ms | |
|
||||||
| GTX 1660 SUPER | ~ 4 ms |
|
| GTX 1660 SUPER | ~ 4 ms | |
|
||||||
| RTX 3050 | 5 - 7 ms |
|
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||||
| RTX 3070 Mobile | ~ 5 ms |
|
| RTX 3070 Mobile | ~ 5 ms | |
|
||||||
| Quadro P400 2GB | 20 - 25 ms |
|
| Quadro P400 2GB | 20 - 25 ms | |
|
||||||
| Quadro P2000 | ~ 12 ms |
|
| Quadro P2000 | ~ 12 ms | |
|
||||||
|
|
||||||
#### AMD GPUs
|
### AMD GPUs
|
||||||
|
|
||||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
|
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||||
|
|
||||||
### Community Supported:
|
### Hailo-8l PCIe
|
||||||
|
|
||||||
#### Nvidia Jetson
|
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
|
||||||
|
|
||||||
|
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
|
||||||
|
|
||||||
|
## Community Supported Detectors
|
||||||
|
|
||||||
|
### Nvidia Jetson
|
||||||
|
|
||||||
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||||
|
|
||||||
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
|
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
|
||||||
|
|
||||||
#### Rockchip platform
|
### Rockchip platform
|
||||||
|
|
||||||
Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards:
|
Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards:
|
||||||
|
|
||||||
@ -112,12 +118,6 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
|
|||||||
|
|
||||||
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
|
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
|
||||||
|
|
||||||
#### Hailo-8l PCIe
|
|
||||||
|
|
||||||
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
|
|
||||||
|
|
||||||
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
|
|
||||||
|
|
||||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||||
|
|
||||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||||
|
|||||||
@ -111,7 +111,7 @@ For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simpl
|
|||||||
For other installations, follow these steps for installation:
|
For other installations, follow these steps for installation:
|
||||||
|
|
||||||
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
|
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
|
||||||
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/41c9b13d2fffce508b32dfc971fa529b49295fbd/docker/hailo8l/user_installation.sh).
|
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
|
||||||
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||||
4. Run the script with `./user_installation.sh`
|
4. Run the script with `./user_installation.sh`
|
||||||
|
|
||||||
@ -305,8 +305,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
|
|||||||
|
|
||||||
## Proxmox
|
## Proxmox
|
||||||
|
|
||||||
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
|
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isn’t possible with containers.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
|
||||||
|
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
Suggestions include:
|
||||||
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
|
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
|
||||||
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
|
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
|
||||||
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
|
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
|
||||||
|
|||||||
@ -7,7 +7,7 @@ title: Configuring go2rtc
|
|||||||
|
|
||||||
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
|
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
|
||||||
|
|
||||||
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
|
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
|
||||||
- Live stream support for cameras in Home Assistant Integration
|
- Live stream support for cameras in Home Assistant Integration
|
||||||
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
|
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
|
||||||
|
|
||||||
|
|||||||
@ -47,7 +47,7 @@ that card.
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
When configuring the integration, you will be asked for the `URL` of your Frigate instance which needs to be pointed at the internal unauthenticated port (`5000`) for your instance. This may look like `http://<host>:5000/`.
|
When configuring the integration, you will be asked for the `URL` of your Frigate instance which can be pointed at the internal unauthenticated port (`5000`) or the authenticated port (`8971`) for your instance. This may look like `http://<host>:5000/`.
|
||||||
|
|
||||||
### Docker Compose Examples
|
### Docker Compose Examples
|
||||||
|
|
||||||
@ -55,7 +55,7 @@ If you are running Home Assistant Core and Frigate with Docker Compose on the sa
|
|||||||
|
|
||||||
#### Home Assistant running with host networking
|
#### Home Assistant running with host networking
|
||||||
|
|
||||||
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` when configuring the integration.
|
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` or `http://172.17.0.1:8971` when configuring the integration.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
@ -75,7 +75,7 @@ services:
|
|||||||
|
|
||||||
#### Home Assistant _not_ running with host networking or in a separate compose file
|
#### Home Assistant _not_ running with host networking or in a separate compose file
|
||||||
|
|
||||||
In this example, you would use `http://frigate:5000` when configuring the integration. There is no need to map the port for the Frigate container.
|
In this example, it is recommended to connect to the authenticated port, for example, `http://frigate:8971` when configuring the integration. There is no need to map the port for the Frigate container.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
@ -98,19 +98,20 @@ services:
|
|||||||
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
|
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
|
||||||
|
|
||||||
| Addon Version | URL |
|
| Addon Version | URL |
|
||||||
| ------------------------------ | -------------------------------------- |
|
| ------------------------------ | ----------------------------------------- |
|
||||||
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
||||||
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||||
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||||
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||||
|
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
|
||||||
|
|
||||||
### Frigate running on a separate machine
|
### Frigate running on a separate machine
|
||||||
|
|
||||||
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 5000.
|
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 8971.
|
||||||
|
|
||||||
#### Local network
|
#### Local network
|
||||||
|
|
||||||
Use `http://<frigate_device_ip>:5000` as the URL for the integration. If you want to protect access to port 5000, you can use firewall rules to limit access to the device running Home Assistant.
|
Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
services:
|
services:
|
||||||
@ -118,7 +119,7 @@ services:
|
|||||||
image: ghcr.io/blakeblackshear/frigate:stable
|
image: ghcr.io/blakeblackshear/frigate:stable
|
||||||
...
|
...
|
||||||
ports:
|
ports:
|
||||||
- "5000:5000"
|
- "8971:8971"
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -195,12 +196,30 @@ To load a snapshot for a tracked object:
|
|||||||
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
|
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
|
||||||
```
|
```
|
||||||
|
|
||||||
To load a video clip of a tracked object:
|
To load a video clip of a tracked object using an Android device:
|
||||||
|
|
||||||
```
|
```
|
||||||
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
|
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To load a video clip of a tracked object using an iOS device:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://HA_URL/api/frigate/notifications/<event-id>/master.m3u8
|
||||||
|
```
|
||||||
|
|
||||||
|
To load a preview gif of a tracked object:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://HA_URL/api/frigate/notifications/<event-id>/event_preview.gif
|
||||||
|
```
|
||||||
|
|
||||||
|
To load a preview gif of a review item:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
|
||||||
|
```
|
||||||
|
|
||||||
<a name="streams"></a>
|
<a name="streams"></a>
|
||||||
|
|
||||||
## RTSP stream
|
## RTSP stream
|
||||||
@ -282,3 +301,7 @@ which server they are referring to.
|
|||||||
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
|
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
|
||||||
|
|
||||||
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
|
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
|
||||||
|
|
||||||
|
#### I have set up automations based on the occupancy sensors. Sometimes the automation runs because the sensors are turned on, but then I look at Frigate I can't find the object that triggered the sensor. Is this a bug?
|
||||||
|
|
||||||
|
No. The occupancy sensors have fewer checks in place because they are often used for things like turning the lights on where latency needs to be as low as possible. So false positives can sometimes trigger these sensors. If you want false positive filtering, you should use an mqtt sensor on the `frigate/events` or `frigate/reviews` topic.
|
||||||
|
|||||||
@ -29,7 +29,9 @@ You cannot use the `environment_vars` section of your Frigate configuration file
|
|||||||
|
|
||||||
## Submit examples
|
## Submit examples
|
||||||
|
|
||||||
Once your API key is configured, you can submit examples directly from the Explore page in Frigate using the `Frigate+` button.
|
Once your API key is configured, you can submit examples directly from the Explore page in Frigate. From the More Filters menu, select "Has a Snapshot - Yes" and "Submitted to Frigate+ - No", and press Apply at the bottom of the pane. Then, click on a thumbnail and select the Snapshot tab.
|
||||||
|
|
||||||
|
You can use your keyboard's left and right arrow keys to quickly navigate between the tracked object snapshots.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
@ -37,8 +39,6 @@ Snapshots must be enabled to be able to submit examples to Frigate+
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### Annotate and verify
|
### Annotate and verify
|
||||||
|
|||||||
@ -19,6 +19,10 @@ Please use your own knowledge to assess and vet them before you install anything
|
|||||||
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
|
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
|
||||||
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
|
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
|
||||||
|
|
||||||
|
## [Frigate Notify](https://github.com/0x2142/frigate-notify)
|
||||||
|
|
||||||
|
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
|
||||||
|
|
||||||
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
|
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
|
||||||
|
|
||||||
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
||||||
|
|||||||
@ -5,7 +5,7 @@ title: Requesting your first model
|
|||||||
|
|
||||||
## Step 1: Upload and annotate your images
|
## Step 1: Upload and annotate your images
|
||||||
|
|
||||||
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
Before requesting your first model, you will need to upload and verify at least 10 images to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
||||||
|
|
||||||
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||||
|
|
||||||
|
|||||||
@ -13,7 +13,7 @@ You may find that Frigate+ models result in more false positives initially, but
|
|||||||
|
|
||||||
For the best results, follow the following guidelines.
|
For the best results, follow the following guidelines.
|
||||||
|
|
||||||
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
|
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused. You can exclude labels that you don't want detected on any of your cameras.
|
||||||
|
|
||||||
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
|
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ For the best results, follow the following guidelines.
|
|||||||
|
|
||||||
**Label objects hard to identify as difficult**: When objects are truly difficult to make out, such as a car barely visible through a bush, or a dog that is hard to distinguish from the background at night, flag it as 'difficult'. This is not used in the model training as of now, but will in the future.
|
**Label objects hard to identify as difficult**: When objects are truly difficult to make out, such as a car barely visible through a bush, or a dog that is hard to distinguish from the background at night, flag it as 'difficult'. This is not used in the model training as of now, but will in the future.
|
||||||
|
|
||||||
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
|
**Delivery logos such as `amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|||||||
@ -17,7 +17,7 @@ Information on how to integrate Frigate+ with Frigate can be found in the [integ
|
|||||||
|
|
||||||
## Available model types
|
## Available model types
|
||||||
|
|
||||||
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||||
|
|
||||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
|
|||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
|
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -48,11 +48,19 @@ _\* Requires Frigate 0.15_
|
|||||||
|
|
||||||
## Available label types
|
## Available label types
|
||||||
|
|
||||||
Frigate+ models support a more relevant set of objects for security cameras. Currently, only the following objects are supported: `person`, `face`, `car`, `license_plate`, `amazon`, `ups`, `fedex`, `package`, `dog`, `cat`, `deer`. Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
|
Frigate+ models support a more relevant set of objects for security cameras. Currently, the following objects are supported:
|
||||||
|
|
||||||
|
- **People**: `person`, `face`
|
||||||
|
- **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `license_plate`
|
||||||
|
- **Delivery Logos**: `amazon`, `usps`, `ups`, `fedex`, `dhl`, `an_post`, `purolator`, `postnl`, `nzpost`, `postnord`, `gls`, `dpd`
|
||||||
|
- **Animals**: `dog`, `cat`, `deer`, `horse`, `bird`, `raccoon`, `fox`, `bear`, `cow`, `squirrel`, `goat`, `rabbit`
|
||||||
|
- **Other**: `package`, `waste_bin`, `bbq_grill`, `robot_lawnmower`, `umbrella`
|
||||||
|
|
||||||
|
Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
|
||||||
|
|
||||||
### Label attributes
|
### Label attributes
|
||||||
|
|
||||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||||
|
|
||||||
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:
|
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:
|
||||||
|
|
||||||
@ -75,6 +83,6 @@ When using Frigate+ models, Frigate will choose the snapshot of a person object
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
`amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
|
Delivery logos such as `amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
|
||||||
|
|
||||||

|

|
||||||
|
|||||||
@ -54,6 +54,17 @@ The most common reason for the PCIe Coral not being detected is that the driver
|
|||||||
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||||
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||||
|
|
||||||
|
### Not detected on Raspberry Pi5
|
||||||
|
|
||||||
|
A kernel update to the RPi5 means an upate to config.txt is required, see [the raspberry pi forum for more info](https://forums.raspberrypi.com/viewtopic.php?t=363682&sid=cb59b026a412f0dc041595951273a9ca&start=25)
|
||||||
|
|
||||||
|
Specifically, add the following to config.txt
|
||||||
|
|
||||||
|
```
|
||||||
|
dtoverlay=pciex1-compat-pi5,no-mip
|
||||||
|
dtoverlay=pcie-32bit-dma-pi5
|
||||||
|
```
|
||||||
|
|
||||||
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
||||||
|
|
||||||
Coral Dual EdgeTPU is one card with two identical TPU cores. Each core has it's own PCIe interface and motherboard needs to have two PCIe busses on the m.2 slot to make them both work.
|
Coral Dual EdgeTPU is one card with two identical TPU cores. Each core has it's own PCIe interface and motherboard needs to have two PCIe busses on the m.2 slot to make them both work.
|
||||||
|
|||||||
@ -17,6 +17,10 @@ ffmpeg:
|
|||||||
record: preset-record-generic-audio-aac
|
record: preset-record-generic-audio-aac
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### How can I get sound in live view?
|
||||||
|
|
||||||
|
Audio is only supported for live view when go2rtc is configured, see [the live docs](../configuration/live.md) for more information.
|
||||||
|
|
||||||
### I can't view recordings in the Web UI.
|
### I can't view recordings in the Web UI.
|
||||||
|
|
||||||
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
||||||
@ -98,3 +102,11 @@ docker run -d \
|
|||||||
-p 8555:8555/udp \
|
-p 8555:8555/udp \
|
||||||
ghcr.io/blakeblackshear/frigate:stable
|
ghcr.io/blakeblackshear/frigate:stable
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### My RTSP stream works fine in VLC, but it does not work when I put the same URL in my Frigate config. Is this a bug?
|
||||||
|
|
||||||
|
No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC automatically switches between UDP and TCP depending on network conditions and stream availability. So a stream that works in VLC but not in Frigate is likely due to VLC selecting UDP as the transfer protocol.
|
||||||
|
|
||||||
|
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
|
||||||
|
|
||||||
|
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
|
||||||
|
|||||||
@ -3,7 +3,15 @@ id: recordings
|
|||||||
title: Troubleshooting Recordings
|
title: Troubleshooting Recordings
|
||||||
---
|
---
|
||||||
|
|
||||||
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
|
||||||
|
|
||||||
|
You'll want to:
|
||||||
|
|
||||||
|
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
|
||||||
|
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
|
||||||
|
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
|
||||||
|
|
||||||
|
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
||||||
|
|
||||||
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
|
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
|
||||||
|
|
||||||
@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
|
|||||||
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
|
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
|
||||||
|
|
||||||
**Compose example**
|
**Compose example**
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
version: "3.9"
|
version: "3.9"
|
||||||
services:
|
services:
|
||||||
@ -54,6 +63,7 @@ services:
|
|||||||
```
|
```
|
||||||
|
|
||||||
**Run command example**
|
**Run command example**
|
||||||
|
|
||||||
```
|
```
|
||||||
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
|
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
|
||||||
```
|
```
|
||||||
|
|||||||
7069
docs/package-lock.json
generated
@ -17,15 +17,15 @@
|
|||||||
"write-heading-ids": "docusaurus write-heading-ids"
|
"write-heading-ids": "docusaurus write-heading-ids"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@docusaurus/core": "^3.5.2",
|
"@docusaurus/core": "^3.6.3",
|
||||||
"@docusaurus/preset-classic": "^3.5.2",
|
"@docusaurus/preset-classic": "^3.6.3",
|
||||||
"@docusaurus/theme-mermaid": "^3.5.2",
|
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||||
"@docusaurus/plugin-content-docs": "^3.5.2",
|
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||||
"@mdx-js/react": "^3.0.1",
|
"@mdx-js/react": "^3.1.0",
|
||||||
"clsx": "^2.1.1",
|
"clsx": "^2.1.1",
|
||||||
"docusaurus-plugin-openapi-docs": "^4.1.0",
|
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||||
"docusaurus-theme-openapi-docs": "^4.1.0",
|
"docusaurus-theme-openapi-docs": "^4.3.1",
|
||||||
"prism-react-renderer": "^2.4.0",
|
"prism-react-renderer": "^2.4.1",
|
||||||
"raw-loader": "^4.0.2",
|
"raw-loader": "^4.0.2",
|
||||||
"react": "^18.3.1",
|
"react": "^18.3.1",
|
||||||
"react-dom": "^18.3.1"
|
"react-dom": "^18.3.1"
|
||||||
|
|||||||
@ -33,9 +33,11 @@ const sidebars: SidebarsConfig = {
|
|||||||
'configuration/object_detectors',
|
'configuration/object_detectors',
|
||||||
'configuration/audio_detectors',
|
'configuration/audio_detectors',
|
||||||
],
|
],
|
||||||
'Semantic Search': [
|
Classifiers: [
|
||||||
'configuration/semantic_search',
|
'configuration/semantic_search',
|
||||||
'configuration/genai',
|
'configuration/genai',
|
||||||
|
'configuration/face_recognition',
|
||||||
|
'configuration/license_plate_recognition',
|
||||||
],
|
],
|
||||||
Cameras: [
|
Cameras: [
|
||||||
'configuration/cameras',
|
'configuration/cameras',
|
||||||
@ -82,6 +84,7 @@ const sidebars: SidebarsConfig = {
|
|||||||
items: frigateHttpApiSidebar,
|
items: frigateHttpApiSidebar,
|
||||||
},
|
},
|
||||||
'integrations/mqtt',
|
'integrations/mqtt',
|
||||||
|
'configuration/metrics',
|
||||||
'integrations/third_party_extensions',
|
'integrations/third_party_extensions',
|
||||||
],
|
],
|
||||||
'Frigate+': [
|
'Frigate+': [
|
||||||
|
|||||||
@ -3,12 +3,15 @@ import faulthandler
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import ruamel.yaml
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from frigate.app import FrigateApp
|
from frigate.app import FrigateApp
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.log import setup_logging
|
from frigate.log import setup_logging
|
||||||
|
from frigate.util.config import find_config_file
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
@ -42,10 +45,51 @@ def main() -> None:
|
|||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
print("*** Config Validation Errors ***")
|
print("*** Config Validation Errors ***")
|
||||||
print("*************************************************************")
|
print("*************************************************************\n")
|
||||||
|
# Attempt to get the original config file for line number tracking
|
||||||
|
config_path = find_config_file()
|
||||||
|
with open(config_path, "r") as f:
|
||||||
|
yaml_config = ruamel.yaml.YAML()
|
||||||
|
yaml_config.preserve_quotes = True
|
||||||
|
full_config = yaml_config.load(f)
|
||||||
|
|
||||||
for error in e.errors():
|
for error in e.errors():
|
||||||
location = ".".join(str(item) for item in error["loc"])
|
error_path = error["loc"]
|
||||||
print(f"{location}: {error['msg']}")
|
|
||||||
|
current = full_config
|
||||||
|
line_number = "Unknown"
|
||||||
|
last_line_number = "Unknown"
|
||||||
|
|
||||||
|
try:
|
||||||
|
for i, part in enumerate(error_path):
|
||||||
|
key: Union[int, str] = (
|
||||||
|
int(part) if isinstance(part, str) and part.isdigit() else part
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(current, ruamel.yaml.comments.CommentedMap):
|
||||||
|
current = current[key]
|
||||||
|
elif isinstance(current, list):
|
||||||
|
if isinstance(key, int):
|
||||||
|
current = current[key]
|
||||||
|
|
||||||
|
if hasattr(current, "lc"):
|
||||||
|
last_line_number = current.lc.line
|
||||||
|
|
||||||
|
if i == len(error_path) - 1:
|
||||||
|
if hasattr(current, "lc"):
|
||||||
|
line_number = current.lc.line
|
||||||
|
else:
|
||||||
|
line_number = last_line_number
|
||||||
|
|
||||||
|
except Exception as traverse_error:
|
||||||
|
print(f"Could not determine exact line number: {traverse_error}")
|
||||||
|
|
||||||
|
if current != full_config:
|
||||||
|
print(f"Line # : {line_number}")
|
||||||
|
print(f"Key : {' -> '.join(map(str, error_path))}")
|
||||||
|
print(f"Value : {error.get('input', '-')}")
|
||||||
|
print(f"Message : {error.get('msg', error.get('type', 'Unknown'))}\n")
|
||||||
|
|
||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
print("*** End Config Validation Errors ***")
|
print("*** End Config Validation Errors ***")
|
||||||
print("*************************************************************")
|
print("*************************************************************")
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
"""Main api runner."""
|
"""Main api runner."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
@ -7,30 +8,36 @@ import os
|
|||||||
import traceback
|
import traceback
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
from io import StringIO
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import aiofiles
|
||||||
import requests
|
import requests
|
||||||
|
import ruamel.yaml
|
||||||
from fastapi import APIRouter, Body, Path, Request, Response
|
from fastapi import APIRouter, Body, Path, Request, Response
|
||||||
from fastapi.encoders import jsonable_encoder
|
from fastapi.encoders import jsonable_encoder
|
||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import JSONResponse, PlainTextResponse
|
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
|
||||||
from markupsafe import escape
|
from markupsafe import escape
|
||||||
from peewee import operator
|
from peewee import operator
|
||||||
|
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
|
||||||
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import CONFIG_DIR
|
|
||||||
from frigate.models import Event, Timeline
|
from frigate.models import Event, Timeline
|
||||||
from frigate.util.builtin import (
|
from frigate.util.builtin import (
|
||||||
clean_camera_user_pass,
|
clean_camera_user_pass,
|
||||||
get_tz_modifiers,
|
get_tz_modifiers,
|
||||||
update_yaml_from_url,
|
update_yaml_from_url,
|
||||||
)
|
)
|
||||||
|
from frigate.util.config import find_config_file
|
||||||
from frigate.util.services import (
|
from frigate.util.services import (
|
||||||
ffprobe_stream,
|
ffprobe_stream,
|
||||||
get_nvidia_driver_info,
|
get_nvidia_driver_info,
|
||||||
|
process_logs,
|
||||||
restart_frigate,
|
restart_frigate,
|
||||||
vainfo_hwaccel,
|
vainfo_hwaccel,
|
||||||
)
|
)
|
||||||
@ -105,6 +112,12 @@ def stats_history(request: Request, keys: str = None):
|
|||||||
return JSONResponse(content=request.app.stats_emitter.get_stats_history(keys))
|
return JSONResponse(content=request.app.stats_emitter.get_stats_history(keys))
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/metrics")
|
||||||
|
def metrics():
|
||||||
|
"""Expose Prometheus metrics endpoint"""
|
||||||
|
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/config")
|
@router.get("/config")
|
||||||
def config(request: Request):
|
def config(request: Request):
|
||||||
config_obj: FrigateConfig = request.app.frigate_config
|
config_obj: FrigateConfig = request.app.frigate_config
|
||||||
@ -134,9 +147,29 @@ def config(request: Request):
|
|||||||
for zone_name, zone in config_obj.cameras[camera_name].zones.items():
|
for zone_name, zone in config_obj.cameras[camera_name].zones.items():
|
||||||
camera_dict["zones"][zone_name]["color"] = zone.color
|
camera_dict["zones"][zone_name]["color"] = zone.color
|
||||||
|
|
||||||
|
# remove go2rtc stream passwords
|
||||||
|
go2rtc: dict[str, any] = config_obj.go2rtc.model_dump(
|
||||||
|
mode="json", warnings="none", exclude_none=True
|
||||||
|
)
|
||||||
|
for stream_name, stream in go2rtc.get("streams", {}).items():
|
||||||
|
if stream is None:
|
||||||
|
continue
|
||||||
|
if isinstance(stream, str):
|
||||||
|
cleaned = clean_camera_user_pass(stream)
|
||||||
|
else:
|
||||||
|
cleaned = []
|
||||||
|
|
||||||
|
for item in stream:
|
||||||
|
cleaned.append(clean_camera_user_pass(item))
|
||||||
|
|
||||||
|
config["go2rtc"]["streams"][stream_name] = cleaned
|
||||||
|
|
||||||
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
|
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
|
||||||
config["model"]["colormap"] = config_obj.model.colormap
|
config["model"]["colormap"] = config_obj.model.colormap
|
||||||
|
config["model"]["all_attributes"] = config_obj.model.all_attributes
|
||||||
|
config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes
|
||||||
|
|
||||||
|
# use merged labelamp
|
||||||
for detector_config in config["detectors"].values():
|
for detector_config in config["detectors"].values():
|
||||||
detector_config["model"]["labelmap"] = (
|
detector_config["model"]["labelmap"] = (
|
||||||
request.app.frigate_config.model.merged_labelmap
|
request.app.frigate_config.model.merged_labelmap
|
||||||
@ -147,13 +180,7 @@ def config(request: Request):
|
|||||||
|
|
||||||
@router.get("/config/raw")
|
@router.get("/config/raw")
|
||||||
def config_raw():
|
def config_raw():
|
||||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
config_file = find_config_file()
|
||||||
|
|
||||||
# Check if we can use .yaml instead of .yml
|
|
||||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
|
||||||
|
|
||||||
if os.path.isfile(config_file_yaml):
|
|
||||||
config_file = config_file_yaml
|
|
||||||
|
|
||||||
if not os.path.isfile(config_file):
|
if not os.path.isfile(config_file):
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -173,7 +200,6 @@ def config_raw():
|
|||||||
@router.post("/config/save")
|
@router.post("/config/save")
|
||||||
def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||||
new_config = body.decode()
|
new_config = body.decode()
|
||||||
|
|
||||||
if not new_config:
|
if not new_config:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
@ -184,13 +210,64 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
|||||||
|
|
||||||
# Validate the config schema
|
# Validate the config schema
|
||||||
try:
|
try:
|
||||||
|
# Use ruamel to parse and preserve line numbers
|
||||||
|
yaml_config = ruamel.yaml.YAML()
|
||||||
|
yaml_config.preserve_quotes = True
|
||||||
|
full_config = yaml_config.load(StringIO(new_config))
|
||||||
|
|
||||||
FrigateConfig.parse_yaml(new_config)
|
FrigateConfig.parse_yaml(new_config)
|
||||||
|
|
||||||
|
except ValidationError as e:
|
||||||
|
error_message = []
|
||||||
|
|
||||||
|
for error in e.errors():
|
||||||
|
error_path = error["loc"]
|
||||||
|
current = full_config
|
||||||
|
line_number = "Unknown"
|
||||||
|
last_line_number = "Unknown"
|
||||||
|
|
||||||
|
try:
|
||||||
|
for i, part in enumerate(error_path):
|
||||||
|
key = int(part) if part.isdigit() else part
|
||||||
|
|
||||||
|
if isinstance(current, ruamel.yaml.comments.CommentedMap):
|
||||||
|
current = current[key]
|
||||||
|
elif isinstance(current, list):
|
||||||
|
current = current[key]
|
||||||
|
|
||||||
|
if hasattr(current, "lc"):
|
||||||
|
last_line_number = current.lc.line
|
||||||
|
|
||||||
|
if i == len(error_path) - 1:
|
||||||
|
if hasattr(current, "lc"):
|
||||||
|
line_number = current.lc.line
|
||||||
|
else:
|
||||||
|
line_number = last_line_number
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
line_number = "Unable to determine"
|
||||||
|
|
||||||
|
error_message.append(
|
||||||
|
f"Line {line_number}: {' -> '.join(map(str, error_path))} - {error.get('msg', error.get('type', 'Unknown'))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": "Your configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n"
|
||||||
|
+ "\n".join(error_message),
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
{
|
{
|
||||||
"success": False,
|
"success": False,
|
||||||
"message": f"\nConfig Error:\n\n{escape(str(traceback.format_exc()))}",
|
"message": f"\nYour configuration is invalid.\nSee the official documentation at docs.frigate.video.\n\n{escape(str(traceback.format_exc()))}",
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
status_code=400,
|
status_code=400,
|
||||||
@ -198,13 +275,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
|||||||
|
|
||||||
# Save the config to file
|
# Save the config to file
|
||||||
try:
|
try:
|
||||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
config_file = find_config_file()
|
||||||
|
|
||||||
# Check if we can use .yaml instead of .yml
|
|
||||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
|
||||||
|
|
||||||
if os.path.isfile(config_file_yaml):
|
|
||||||
config_file = config_file_yaml
|
|
||||||
|
|
||||||
with open(config_file, "w") as f:
|
with open(config_file, "w") as f:
|
||||||
f.write(new_config)
|
f.write(new_config)
|
||||||
@ -253,13 +324,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
|||||||
|
|
||||||
@router.put("/config/set")
|
@router.put("/config/set")
|
||||||
def config_set(request: Request, body: AppConfigSetBody):
|
def config_set(request: Request, body: AppConfigSetBody):
|
||||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
config_file = find_config_file()
|
||||||
|
|
||||||
# Check if we can use .yaml instead of .yml
|
|
||||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
|
||||||
|
|
||||||
if os.path.isfile(config_file_yaml):
|
|
||||||
config_file = config_file_yaml
|
|
||||||
|
|
||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
old_raw_config = f.read()
|
old_raw_config = f.read()
|
||||||
@ -393,9 +458,10 @@ def nvinfo():
|
|||||||
|
|
||||||
|
|
||||||
@router.get("/logs/{service}", tags=[Tags.logs])
|
@router.get("/logs/{service}", tags=[Tags.logs])
|
||||||
def logs(
|
async def logs(
|
||||||
service: str = Path(enum=["frigate", "nginx", "go2rtc"]),
|
service: str = Path(enum=["frigate", "nginx", "go2rtc"]),
|
||||||
download: Optional[str] = None,
|
download: Optional[str] = None,
|
||||||
|
stream: Optional[bool] = False,
|
||||||
start: Optional[int] = 0,
|
start: Optional[int] = 0,
|
||||||
end: Optional[int] = None,
|
end: Optional[int] = None,
|
||||||
):
|
):
|
||||||
@ -414,6 +480,27 @@ def logs(
|
|||||||
status_code=500,
|
status_code=500,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def stream_logs(file_path: str):
|
||||||
|
"""Asynchronously stream log lines."""
|
||||||
|
buffer = ""
|
||||||
|
try:
|
||||||
|
async with aiofiles.open(file_path, "r") as file:
|
||||||
|
await file.seek(0, 2)
|
||||||
|
while True:
|
||||||
|
line = await file.readline()
|
||||||
|
if line:
|
||||||
|
buffer += line
|
||||||
|
# Process logs only when there are enough lines in the buffer
|
||||||
|
if "\n" in buffer:
|
||||||
|
_, processed_lines = process_logs(buffer, service)
|
||||||
|
buffer = ""
|
||||||
|
for processed_line in processed_lines:
|
||||||
|
yield f"{processed_line}\n"
|
||||||
|
else:
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
except FileNotFoundError:
|
||||||
|
yield "Log file not found.\n"
|
||||||
|
|
||||||
log_locations = {
|
log_locations = {
|
||||||
"frigate": "/dev/shm/logs/frigate/current",
|
"frigate": "/dev/shm/logs/frigate/current",
|
||||||
"go2rtc": "/dev/shm/logs/go2rtc/current",
|
"go2rtc": "/dev/shm/logs/go2rtc/current",
|
||||||
@ -430,48 +517,17 @@ def logs(
|
|||||||
if download:
|
if download:
|
||||||
return download_logs(service_location)
|
return download_logs(service_location)
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
return StreamingResponse(stream_logs(service_location), media_type="text/plain")
|
||||||
|
|
||||||
|
# For full logs initially
|
||||||
try:
|
try:
|
||||||
file = open(service_location, "r")
|
async with aiofiles.open(service_location, "r") as file:
|
||||||
contents = file.read()
|
contents = await file.read()
|
||||||
file.close()
|
|
||||||
|
|
||||||
# use the start timestamp to group logs together``
|
|
||||||
logLines = []
|
|
||||||
keyLength = 0
|
|
||||||
dateEnd = 0
|
|
||||||
currentKey = ""
|
|
||||||
currentLine = ""
|
|
||||||
|
|
||||||
for rawLine in contents.splitlines():
|
|
||||||
cleanLine = rawLine.strip()
|
|
||||||
|
|
||||||
if len(cleanLine) < 10:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# handle cases where S6 does not include date in log line
|
|
||||||
if " " not in cleanLine:
|
|
||||||
cleanLine = f"{datetime.now()} {cleanLine}"
|
|
||||||
|
|
||||||
if dateEnd == 0:
|
|
||||||
dateEnd = cleanLine.index(" ")
|
|
||||||
keyLength = dateEnd - (6 if service_location == "frigate" else 0)
|
|
||||||
|
|
||||||
newKey = cleanLine[0:keyLength]
|
|
||||||
|
|
||||||
if newKey == currentKey:
|
|
||||||
currentLine += f"\n{cleanLine[dateEnd:].strip()}"
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if len(currentLine) > 0:
|
|
||||||
logLines.append(currentLine)
|
|
||||||
|
|
||||||
currentKey = newKey
|
|
||||||
currentLine = cleanLine
|
|
||||||
|
|
||||||
logLines.append(currentLine)
|
|
||||||
|
|
||||||
|
total_lines, log_lines = process_logs(contents, service, start, end)
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content={"totalLines": len(logLines), "lines": logLines[start:end]},
|
content={"totalLines": total_lines, "lines": log_lines},
|
||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
except FileNotFoundError as e:
|
except FileNotFoundError as e:
|
||||||
|
|||||||
@ -329,7 +329,7 @@ def login(request: Request, body: AppPostLoginBody):
|
|||||||
try:
|
try:
|
||||||
db_user: User = User.get_by_id(user)
|
db_user: User = User.get_by_id(user)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
return JSONResponse(content={"message": "Login failed"}, status_code=400)
|
return JSONResponse(content={"message": "Login failed"}, status_code=401)
|
||||||
|
|
||||||
password_hash = db_user.password_hash
|
password_hash = db_user.password_hash
|
||||||
if verify_password(password, password_hash):
|
if verify_password(password, password_hash):
|
||||||
@ -340,7 +340,7 @@ def login(request: Request, body: AppPostLoginBody):
|
|||||||
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
|
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
|
||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
return JSONResponse(content={"message": "Login failed"}, status_code=400)
|
return JSONResponse(content={"message": "Login failed"}, status_code=401)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/users")
|
@router.get("/users")
|
||||||
|
|||||||
178
frigate/api/classification.py
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
"""Object classification APIs."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import shutil
|
||||||
|
import string
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, UploadFile
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from pathvalidate import sanitize_filename
|
||||||
|
|
||||||
|
from frigate.api.defs.tags import Tags
|
||||||
|
from frigate.const import FACE_DIR
|
||||||
|
from frigate.embeddings import EmbeddingsContext
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(tags=[Tags.events])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/faces")
|
||||||
|
def get_faces():
|
||||||
|
face_dict: dict[str, list[str]] = {}
|
||||||
|
|
||||||
|
for name in os.listdir(FACE_DIR):
|
||||||
|
face_dir = os.path.join(FACE_DIR, name)
|
||||||
|
|
||||||
|
if not os.path.isdir(face_dir):
|
||||||
|
continue
|
||||||
|
|
||||||
|
face_dict[name] = []
|
||||||
|
|
||||||
|
for file in sorted(
|
||||||
|
os.listdir(face_dir),
|
||||||
|
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
|
||||||
|
reverse=True,
|
||||||
|
):
|
||||||
|
face_dict[name].append(file)
|
||||||
|
|
||||||
|
return JSONResponse(status_code=200, content=face_dict)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/reprocess")
|
||||||
|
def reclassify_face(request: Request, body: dict = None):
|
||||||
|
if not request.app.frigate_config.face_recognition.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"message": "Face recognition is not enabled.", "success": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, any] = body or {}
|
||||||
|
training_file = os.path.join(
|
||||||
|
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not training_file or not os.path.isfile(training_file):
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"Invalid filename or no file exists: {training_file}",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
response = context.reprocess_face(training_file)
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=response,
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/train/{name}/classify")
|
||||||
|
def train_face(request: Request, name: str, body: dict = None):
|
||||||
|
if not request.app.frigate_config.face_recognition.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"message": "Face recognition is not enabled.", "success": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, any] = body or {}
|
||||||
|
training_file = os.path.join(
|
||||||
|
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not training_file or not os.path.isfile(training_file):
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"Invalid filename or no file exists: {training_file}",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
sanitized_name = sanitize_filename(name)
|
||||||
|
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||||
|
new_name = f"{sanitized_name}-{rand_id}.webp"
|
||||||
|
new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}")
|
||||||
|
shutil.move(training_file, new_file)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
context.clear_face_classifier()
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": True,
|
||||||
|
"message": f"Successfully saved {training_file} as {new_name}.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/{name}/create")
|
||||||
|
async def create_face(request: Request, name: str):
|
||||||
|
if not request.app.frigate_config.face_recognition.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"message": "Face recognition is not enabled.", "success": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
os.makedirs(
|
||||||
|
os.path.join(FACE_DIR, sanitize_filename(name.replace(" ", "_"))), exist_ok=True
|
||||||
|
)
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=200,
|
||||||
|
content={"success": False, "message": "Successfully created face folder."},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/{name}/register")
|
||||||
|
async def register_face(request: Request, name: str, file: UploadFile):
|
||||||
|
if not request.app.frigate_config.face_recognition.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"message": "Face recognition is not enabled.", "success": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
result = context.register_face(name, await file.read())
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=200 if result.get("success", True) else 400,
|
||||||
|
content=result,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/{name}/delete")
|
||||||
|
def deregister_faces(request: Request, name: str, body: dict = None):
|
||||||
|
if not request.app.frigate_config.face_recognition.enabled:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=400,
|
||||||
|
content={"message": "Face recognition is not enabled.", "success": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
json: dict[str, any] = body or {}
|
||||||
|
list_of_ids = json.get("ids", "")
|
||||||
|
|
||||||
|
if not list_of_ids or len(list_of_ids) == 0:
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": "Not a valid list of ids"}),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
context: EmbeddingsContext = request.app.embeddings
|
||||||
|
context.delete_face_ids(
|
||||||
|
name, map(lambda file: sanitize_filename(file), list_of_ids)
|
||||||
|
)
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
@ -20,6 +20,7 @@ class MediaLatestFrameQueryParams(BaseModel):
|
|||||||
regions: Optional[int] = None
|
regions: Optional[int] = None
|
||||||
quality: Optional[int] = 70
|
quality: Optional[int] = 70
|
||||||
height: Optional[int] = None
|
height: Optional[int] = None
|
||||||
|
store: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
class MediaEventsSnapshotQueryParams(BaseModel):
|
class MediaEventsSnapshotQueryParams(BaseModel):
|
||||||
@ -40,3 +41,8 @@ class MediaMjpegFeedQueryParams(BaseModel):
|
|||||||
mask: Optional[int] = None
|
mask: Optional[int] = None
|
||||||
motion: Optional[int] = None
|
motion: Optional[int] = None
|
||||||
regions: Optional[int] = None
|
regions: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||||
|
timezone: str = "utc"
|
||||||
|
cameras: Optional[str] = "all"
|
||||||
|
|||||||
@ -8,6 +8,9 @@ class EventsSubLabelBody(BaseModel):
|
|||||||
subLabelScore: Optional[float] = Field(
|
subLabelScore: Optional[float] = Field(
|
||||||
title="Score for sub label", default=None, gt=0.0, le=1.0
|
title="Score for sub label", default=None, gt=0.0, le=1.0
|
||||||
)
|
)
|
||||||
|
camera: Optional[str] = Field(
|
||||||
|
title="Camera this object is detected on.", default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class EventsDescriptionBody(BaseModel):
|
class EventsDescriptionBody(BaseModel):
|
||||||
|
|||||||
@ -10,4 +10,5 @@ class Tags(Enum):
|
|||||||
review = "Review"
|
review = "Review"
|
||||||
export = "Export"
|
export = "Export"
|
||||||
events = "Events"
|
events = "Events"
|
||||||
|
classification = "classification"
|
||||||
auth = "Auth"
|
auth = "Auth"
|
||||||
|
|||||||
@ -954,22 +954,41 @@ def set_sub_label(
|
|||||||
try:
|
try:
|
||||||
event: Event = Event.get(Event.id == event_id)
|
event: Event = Event.get(Event.id == event_id)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
|
if not body.camera:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=({"success": False, "message": "Event " + event_id + " not found"}),
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": "Event "
|
||||||
|
+ event_id
|
||||||
|
+ " not found and camera is not provided.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
event = None
|
||||||
|
|
||||||
|
if request.app.detected_frames_processor:
|
||||||
|
tracked_obj: TrackedObject = (
|
||||||
|
request.app.detected_frames_processor.camera_states[
|
||||||
|
event.camera if event else body.camera
|
||||||
|
].tracked_objects.get(event_id)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
tracked_obj = None
|
||||||
|
|
||||||
|
if not event and not tracked_obj:
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{"success": False, "message": "Event " + event_id + " not found."}
|
||||||
|
),
|
||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
|
|
||||||
new_sub_label = body.subLabel
|
new_sub_label = body.subLabel
|
||||||
new_score = body.subLabelScore
|
new_score = body.subLabelScore
|
||||||
|
|
||||||
if not event.end_time:
|
|
||||||
# update tracked object
|
|
||||||
tracked_obj: TrackedObject = (
|
|
||||||
request.app.detected_frames_processor.camera_states[
|
|
||||||
event.camera
|
|
||||||
].tracked_objects.get(event.id)
|
|
||||||
)
|
|
||||||
|
|
||||||
if tracked_obj:
|
if tracked_obj:
|
||||||
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
|
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
|
||||||
|
|
||||||
@ -978,6 +997,7 @@ def set_sub_label(
|
|||||||
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
|
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
|
||||||
).where(Timeline.source_id == event_id).execute()
|
).where(Timeline.source_id == event_id).execute()
|
||||||
|
|
||||||
|
if event:
|
||||||
event.sub_label = new_sub_label
|
event.sub_label = new_sub_label
|
||||||
|
|
||||||
if new_score:
|
if new_score:
|
||||||
@ -986,6 +1006,7 @@ def set_sub_label(
|
|||||||
event.data = data
|
event.data = data
|
||||||
|
|
||||||
event.save()
|
event.save()
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
{
|
{
|
||||||
|
|||||||
@ -11,7 +11,16 @@ from starlette_context import middleware, plugins
|
|||||||
from starlette_context.plugins import Plugin
|
from starlette_context.plugins import Plugin
|
||||||
|
|
||||||
from frigate.api import app as main_app
|
from frigate.api import app as main_app
|
||||||
from frigate.api import auth, event, export, media, notification, preview, review
|
from frigate.api import (
|
||||||
|
auth,
|
||||||
|
classification,
|
||||||
|
event,
|
||||||
|
export,
|
||||||
|
media,
|
||||||
|
notification,
|
||||||
|
preview,
|
||||||
|
review,
|
||||||
|
)
|
||||||
from frigate.api.auth import get_jwt_secret, limiter
|
from frigate.api.auth import get_jwt_secret, limiter
|
||||||
from frigate.comms.event_metadata_updater import (
|
from frigate.comms.event_metadata_updater import (
|
||||||
EventMetadataPublisher,
|
EventMetadataPublisher,
|
||||||
@ -26,14 +35,13 @@ from frigate.storage import StorageMaintainer
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def check_csrf(request: Request):
|
def check_csrf(request: Request) -> bool:
|
||||||
if request.method in ["GET", "HEAD", "OPTIONS", "TRACE"]:
|
if request.method in ["GET", "HEAD", "OPTIONS", "TRACE"]:
|
||||||
pass
|
return True
|
||||||
if "origin" in request.headers and "x-csrf-token" not in request.headers:
|
if "origin" in request.headers and "x-csrf-token" not in request.headers:
|
||||||
return JSONResponse(
|
return False
|
||||||
content={"success": False, "message": "Missing CSRF header"},
|
|
||||||
status_code=401,
|
return True
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Used to retrieve the remote-user header: https://starlette-context.readthedocs.io/en/latest/plugins.html#easy-mode
|
# Used to retrieve the remote-user header: https://starlette-context.readthedocs.io/en/latest/plugins.html#easy-mode
|
||||||
@ -71,7 +79,12 @@ def create_fastapi_app(
|
|||||||
@app.middleware("http")
|
@app.middleware("http")
|
||||||
async def frigate_middleware(request: Request, call_next):
|
async def frigate_middleware(request: Request, call_next):
|
||||||
# Before request
|
# Before request
|
||||||
check_csrf(request)
|
if not check_csrf(request):
|
||||||
|
return JSONResponse(
|
||||||
|
content={"success": False, "message": "Missing CSRF header"},
|
||||||
|
status_code=401,
|
||||||
|
)
|
||||||
|
|
||||||
if database.is_closed():
|
if database.is_closed():
|
||||||
database.connect()
|
database.connect()
|
||||||
|
|
||||||
@ -87,7 +100,11 @@ def create_fastapi_app(
|
|||||||
logger.info("FastAPI started")
|
logger.info("FastAPI started")
|
||||||
|
|
||||||
# Rate limiter (used for login endpoint)
|
# Rate limiter (used for login endpoint)
|
||||||
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit or "")
|
if frigate_config.auth.failed_login_rate_limit is None:
|
||||||
|
limiter.enabled = False
|
||||||
|
else:
|
||||||
|
auth.rateLimiter.set_limit(frigate_config.auth.failed_login_rate_limit)
|
||||||
|
|
||||||
app.state.limiter = limiter
|
app.state.limiter = limiter
|
||||||
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
||||||
app.add_middleware(SlowAPIMiddleware)
|
app.add_middleware(SlowAPIMiddleware)
|
||||||
@ -95,6 +112,7 @@ def create_fastapi_app(
|
|||||||
# Routes
|
# Routes
|
||||||
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
|
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
|
||||||
app.include_router(auth.router)
|
app.include_router(auth.router)
|
||||||
|
app.include_router(classification.router)
|
||||||
app.include_router(review.router)
|
app.include_router(review.router)
|
||||||
app.include_router(main_app.router)
|
app.include_router(main_app.router)
|
||||||
app.include_router(preview.router)
|
app.include_router(preview.router)
|
||||||
|
|||||||
@ -25,6 +25,7 @@ from frigate.api.defs.query.media_query_parameters import (
|
|||||||
MediaEventsSnapshotQueryParams,
|
MediaEventsSnapshotQueryParams,
|
||||||
MediaLatestFrameQueryParams,
|
MediaLatestFrameQueryParams,
|
||||||
MediaMjpegFeedQueryParams,
|
MediaMjpegFeedQueryParams,
|
||||||
|
MediaRecordingsSummaryQueryParams,
|
||||||
)
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
@ -133,6 +134,15 @@ def latest_frame(
|
|||||||
"regions": params.regions,
|
"regions": params.regions,
|
||||||
}
|
}
|
||||||
quality = params.quality
|
quality = params.quality
|
||||||
|
mime_type = extension
|
||||||
|
|
||||||
|
if extension == "png":
|
||||||
|
quality_params = None
|
||||||
|
elif extension == "webp":
|
||||||
|
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
||||||
|
else:
|
||||||
|
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
||||||
|
mime_type = "jpeg"
|
||||||
|
|
||||||
if camera_name in request.app.frigate_config.cameras:
|
if camera_name in request.app.frigate_config.cameras:
|
||||||
frame = frame_processor.get_current_frame(camera_name, draw_options)
|
frame = frame_processor.get_current_frame(camera_name, draw_options)
|
||||||
@ -173,13 +183,16 @@ def latest_frame(
|
|||||||
|
|
||||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
ret, img = cv2.imencode(
|
_, img = cv2.imencode(f".{extension}", frame, quality_params)
|
||||||
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
|
||||||
)
|
|
||||||
return Response(
|
return Response(
|
||||||
content=img.tobytes(),
|
content=img.tobytes(),
|
||||||
media_type=f"image/{extension}",
|
media_type=f"image/{mime_type}",
|
||||||
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
|
headers={
|
||||||
|
"Content-Type": f"image/{mime_type}",
|
||||||
|
"Cache-Control": "no-store"
|
||||||
|
if not params.store
|
||||||
|
else "private, max-age=60",
|
||||||
|
},
|
||||||
)
|
)
|
||||||
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
|
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
|
||||||
frame = cv2.cvtColor(
|
frame = cv2.cvtColor(
|
||||||
@ -192,13 +205,16 @@ def latest_frame(
|
|||||||
|
|
||||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
ret, img = cv2.imencode(
|
_, img = cv2.imencode(f".{extension}", frame, quality_params)
|
||||||
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
|
||||||
)
|
|
||||||
return Response(
|
return Response(
|
||||||
content=img.tobytes(),
|
content=img.tobytes(),
|
||||||
media_type=f"image/{extension}",
|
media_type=f"image/{mime_type}",
|
||||||
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
|
headers={
|
||||||
|
"Content-Type": f"image/{mime_type}",
|
||||||
|
"Cache-Control": "no-store"
|
||||||
|
if not params.store
|
||||||
|
else "private, max-age=60",
|
||||||
|
},
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
@ -241,6 +257,7 @@ def get_snapshot_from_recording(
|
|||||||
recording: Recordings = recording_query.get()
|
recording: Recordings = recording_query.get()
|
||||||
time_in_segment = frame_time - recording.start_time
|
time_in_segment = frame_time - recording.start_time
|
||||||
codec = "png" if format == "png" else "mjpeg"
|
codec = "png" if format == "png" else "mjpeg"
|
||||||
|
mime_type = "png" if format == "png" else "jpeg"
|
||||||
config: FrigateConfig = request.app.frigate_config
|
config: FrigateConfig = request.app.frigate_config
|
||||||
|
|
||||||
image_data = get_image_from_recording(
|
image_data = get_image_from_recording(
|
||||||
@ -257,7 +274,7 @@ def get_snapshot_from_recording(
|
|||||||
),
|
),
|
||||||
status_code=404,
|
status_code=404,
|
||||||
)
|
)
|
||||||
return Response(image_data, headers={"Content-Type": f"image/{format}"})
|
return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content={
|
content={
|
||||||
@ -356,6 +373,50 @@ def get_recordings_storage_usage(request: Request):
|
|||||||
return JSONResponse(content=camera_usages)
|
return JSONResponse(content=camera_usages)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/recordings/summary")
|
||||||
|
def all_recordings_summary(params: MediaRecordingsSummaryQueryParams = Depends()):
|
||||||
|
"""Returns true/false by day indicating if recordings exist"""
|
||||||
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||||
|
|
||||||
|
cameras = params.cameras
|
||||||
|
|
||||||
|
query = (
|
||||||
|
Recordings.select(
|
||||||
|
fn.strftime(
|
||||||
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
Recordings.start_time + seconds_offset,
|
||||||
|
"unixepoch",
|
||||||
|
hour_modifier,
|
||||||
|
minute_modifier,
|
||||||
|
),
|
||||||
|
).alias("day")
|
||||||
|
)
|
||||||
|
.group_by(
|
||||||
|
fn.strftime(
|
||||||
|
"%Y-%m-%d",
|
||||||
|
fn.datetime(
|
||||||
|
Recordings.start_time + seconds_offset,
|
||||||
|
"unixepoch",
|
||||||
|
hour_modifier,
|
||||||
|
minute_modifier,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.order_by(Recordings.start_time.desc())
|
||||||
|
)
|
||||||
|
|
||||||
|
if cameras != "all":
|
||||||
|
query = query.where(Recordings.camera << cameras.split(","))
|
||||||
|
|
||||||
|
print(query)
|
||||||
|
|
||||||
|
recording_days = query.namedtuples()
|
||||||
|
days = {day.day: True for day in recording_days}
|
||||||
|
|
||||||
|
return JSONResponse(content=days)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{camera_name}/recordings/summary")
|
@router.get("/{camera_name}/recordings/summary")
|
||||||
def recordings_summary(camera_name: str, timezone: str = "utc"):
|
def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||||
"""Returns hourly summary for recordings of given camera"""
|
"""Returns hourly summary for recordings of given camera"""
|
||||||
|
|||||||
@ -110,6 +110,28 @@ def review(params: ReviewQueryParams = Depends()):
|
|||||||
return JSONResponse(content=[r for r in review])
|
return JSONResponse(content=[r for r in review])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/review_ids", response_model=list[ReviewSegmentResponse])
|
||||||
|
def review_ids(ids: str):
|
||||||
|
ids = ids.split(",")
|
||||||
|
|
||||||
|
if not ids:
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": "Valid list of ids must be sent"}),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
reviews = (
|
||||||
|
ReviewSegment.select().where(ReviewSegment.id << ids).dicts().iterator()
|
||||||
|
)
|
||||||
|
return JSONResponse(list(reviews))
|
||||||
|
except Exception:
|
||||||
|
return JSONResponse(
|
||||||
|
content=({"success": False, "message": "Review segments not found"}),
|
||||||
|
status_code=400,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/review/summary", response_model=ReviewSummaryResponse)
|
@router.get("/review/summary", response_model=ReviewSummaryResponse)
|
||||||
def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
def review_summary(params: ReviewSummaryQueryParams = Depends()):
|
||||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||||
@ -490,8 +512,6 @@ def set_not_reviewed(review_id: str):
|
|||||||
review.save()
|
review.save()
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
|
||||||
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
|
|
||||||
),
|
|
||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -34,10 +34,12 @@ from frigate.const import (
|
|||||||
CLIPS_DIR,
|
CLIPS_DIR,
|
||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
|
FACE_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
SHM_FRAMES_VAR,
|
SHM_FRAMES_VAR,
|
||||||
)
|
)
|
||||||
|
from frigate.data_processing.types import DataProcessorMetrics
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
||||||
from frigate.events.audio import AudioProcessor
|
from frigate.events.audio import AudioProcessor
|
||||||
@ -88,6 +90,9 @@ class FrigateApp:
|
|||||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||||
self.log_queue: Queue = mp.Queue()
|
self.log_queue: Queue = mp.Queue()
|
||||||
self.camera_metrics: dict[str, CameraMetrics] = {}
|
self.camera_metrics: dict[str, CameraMetrics] = {}
|
||||||
|
self.embeddings_metrics: DataProcessorMetrics | None = (
|
||||||
|
DataProcessorMetrics() if config.semantic_search.enabled else None
|
||||||
|
)
|
||||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||||
self.processes: dict[str, int] = {}
|
self.processes: dict[str, int] = {}
|
||||||
self.embeddings: Optional[EmbeddingsContext] = None
|
self.embeddings: Optional[EmbeddingsContext] = None
|
||||||
@ -96,14 +101,19 @@ class FrigateApp:
|
|||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def ensure_dirs(self) -> None:
|
def ensure_dirs(self) -> None:
|
||||||
for d in [
|
dirs = [
|
||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
f"{CLIPS_DIR}/cache",
|
f"{CLIPS_DIR}/cache",
|
||||||
CACHE_DIR,
|
CACHE_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
]:
|
]
|
||||||
|
|
||||||
|
if self.config.face_recognition.enabled:
|
||||||
|
dirs.append(FACE_DIR)
|
||||||
|
|
||||||
|
for d in dirs:
|
||||||
if not os.path.exists(d) and not os.path.islink(d):
|
if not os.path.exists(d) and not os.path.islink(d):
|
||||||
logger.info(f"Creating directory: {d}")
|
logger.info(f"Creating directory: {d}")
|
||||||
os.makedirs(d)
|
os.makedirs(d)
|
||||||
@ -229,7 +239,10 @@ class FrigateApp:
|
|||||||
embedding_process = util.Process(
|
embedding_process = util.Process(
|
||||||
target=manage_embeddings,
|
target=manage_embeddings,
|
||||||
name="embeddings_manager",
|
name="embeddings_manager",
|
||||||
args=(self.config,),
|
args=(
|
||||||
|
self.config,
|
||||||
|
self.embeddings_metrics,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
embedding_process.daemon = True
|
embedding_process.daemon = True
|
||||||
self.embedding_process = embedding_process
|
self.embedding_process = embedding_process
|
||||||
@ -437,7 +450,7 @@ class FrigateApp:
|
|||||||
# pre-create shms
|
# pre-create shms
|
||||||
for i in range(shm_frame_count):
|
for i in range(shm_frame_count):
|
||||||
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||||
self.frame_manager.create(f"{config.name}_{i}", frame_size)
|
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
||||||
|
|
||||||
capture_process = util.Process(
|
capture_process = util.Process(
|
||||||
target=capture_camera,
|
target=capture_camera,
|
||||||
@ -491,7 +504,11 @@ class FrigateApp:
|
|||||||
self.stats_emitter = StatsEmitter(
|
self.stats_emitter = StatsEmitter(
|
||||||
self.config,
|
self.config,
|
||||||
stats_init(
|
stats_init(
|
||||||
self.config, self.camera_metrics, self.detectors, self.processes
|
self.config,
|
||||||
|
self.camera_metrics,
|
||||||
|
self.embeddings_metrics,
|
||||||
|
self.detectors,
|
||||||
|
self.processes,
|
||||||
),
|
),
|
||||||
self.stop_event,
|
self.stop_event,
|
||||||
)
|
)
|
||||||
|
|||||||
130
frigate/camera/activity_manager.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
"""Manage camera activity and updating listeners."""
|
||||||
|
|
||||||
|
from collections import Counter
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from frigate.config.config import FrigateConfig
|
||||||
|
|
||||||
|
|
||||||
|
class CameraActivityManager:
|
||||||
|
def __init__(
|
||||||
|
self, config: FrigateConfig, publish: Callable[[str, any], None]
|
||||||
|
) -> None:
|
||||||
|
self.config = config
|
||||||
|
self.publish = publish
|
||||||
|
self.last_camera_activity: dict[str, dict[str, any]] = {}
|
||||||
|
self.camera_all_object_counts: dict[str, Counter] = {}
|
||||||
|
self.camera_active_object_counts: dict[str, Counter] = {}
|
||||||
|
self.zone_all_object_counts: dict[str, Counter] = {}
|
||||||
|
self.zone_active_object_counts: dict[str, Counter] = {}
|
||||||
|
self.all_zone_labels: dict[str, set[str]] = {}
|
||||||
|
|
||||||
|
for camera_config in config.cameras.values():
|
||||||
|
if not camera_config.enabled:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.last_camera_activity[camera_config.name] = {}
|
||||||
|
self.camera_all_object_counts[camera_config.name] = Counter()
|
||||||
|
self.camera_active_object_counts[camera_config.name] = Counter()
|
||||||
|
|
||||||
|
for zone, zone_config in camera_config.zones.items():
|
||||||
|
if zone not in self.all_zone_labels:
|
||||||
|
self.zone_all_object_counts[zone] = Counter()
|
||||||
|
self.zone_active_object_counts[zone] = Counter()
|
||||||
|
self.all_zone_labels[zone] = set()
|
||||||
|
|
||||||
|
self.all_zone_labels[zone].update(zone_config.objects)
|
||||||
|
|
||||||
|
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
|
||||||
|
all_objects: list[dict[str, any]] = []
|
||||||
|
|
||||||
|
for camera in new_activity.keys():
|
||||||
|
new_objects = new_activity[camera].get("objects", [])
|
||||||
|
all_objects.extend(new_objects)
|
||||||
|
|
||||||
|
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
|
||||||
|
self.compare_camera_activity(camera, new_objects)
|
||||||
|
|
||||||
|
# run through every zone, getting a count of objects in that zone right now
|
||||||
|
for zone, labels in self.all_zone_labels.items():
|
||||||
|
all_zone_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in all_objects
|
||||||
|
if zone in obj["current_zones"]
|
||||||
|
)
|
||||||
|
active_zone_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in all_objects
|
||||||
|
if zone in obj["current_zones"] and not obj["stationary"]
|
||||||
|
)
|
||||||
|
any_changed = False
|
||||||
|
|
||||||
|
# run through each object and check what topics need to be updated for this zone
|
||||||
|
for label in labels:
|
||||||
|
new_count = all_zone_objects[label]
|
||||||
|
new_active_count = active_zone_objects[label]
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_count != self.zone_all_object_counts[zone][label]
|
||||||
|
or label not in self.zone_all_object_counts[zone]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{zone}/{label}", new_count)
|
||||||
|
self.zone_all_object_counts[zone][label] = new_count
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_active_count != self.zone_active_object_counts[zone][label]
|
||||||
|
or label not in self.zone_active_object_counts[zone]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{zone}/{label}/active", new_active_count)
|
||||||
|
self.zone_active_object_counts[zone][label] = new_active_count
|
||||||
|
|
||||||
|
if any_changed:
|
||||||
|
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
|
||||||
|
self.publish(
|
||||||
|
f"{zone}/all/active", sum(list(active_zone_objects.values()))
|
||||||
|
)
|
||||||
|
|
||||||
|
self.last_camera_activity = new_activity
|
||||||
|
|
||||||
|
def compare_camera_activity(
|
||||||
|
self, camera: str, new_activity: dict[str, any]
|
||||||
|
) -> None:
|
||||||
|
all_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "") for obj in new_activity
|
||||||
|
)
|
||||||
|
active_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in new_activity
|
||||||
|
if not obj["stationary"]
|
||||||
|
)
|
||||||
|
any_changed = False
|
||||||
|
|
||||||
|
# run through each object and check what topics need to be updated
|
||||||
|
for label in self.config.cameras[camera].objects.track:
|
||||||
|
if label in self.config.model.non_logo_attributes:
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_count = all_objects[label]
|
||||||
|
new_active_count = active_objects[label]
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_count != self.camera_all_object_counts[camera][label]
|
||||||
|
or label not in self.camera_all_object_counts[camera]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{camera}/{label}", new_count)
|
||||||
|
self.camera_all_object_counts[camera][label] = new_count
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_active_count != self.camera_active_object_counts[camera][label]
|
||||||
|
or label not in self.camera_active_object_counts[camera]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{camera}/{label}/active", new_active_count)
|
||||||
|
self.camera_active_object_counts[camera][label] = new_active_count
|
||||||
|
|
||||||
|
if any_changed:
|
||||||
|
self.publish(f"{camera}/all", sum(list(all_objects.values())))
|
||||||
|
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))
|
||||||