Merge branch 'fastapi-poc' into fastapi-poc-media-endpoints

This commit is contained in:
Rui Alves 2024-09-21 14:10:54 +01:00
commit 06055a2483
86 changed files with 3438 additions and 1204 deletions

View File

@ -1,168 +1,303 @@
rtmp
edgetpu
labelmap
rockchip
jetson
rocm
vaapi
CUDA
hwaccel
RTSP
Hikvision
Dahua
Amcrest
Reolink
Loryta
Beelink
Celeron
vaapi
blakeblackshear
workdir
onvif
autotracking
openvino
tflite
deepstack
codeproject
udev
tailscale
restream
restreaming
webrtc
ssdlite
mobilenet
mosquitto
datasheet
Jellyfin
Radeon
libva
Ubiquiti
Unifi
Tapo
Annke
autotracker
autotracked
variations
ONVIF
traefik
devcontainer
rootfs
ffprobe
autotrack
logpipe
imread
imwrite
imencode
imutils
thresholded
timelapse
ultrafast
sleeptime
radeontop
vainfo
tmpfs
homography
websockets
LIBAVFORMAT
NTSC
onnxruntime
fourcc
radeonsi
paho
imagestream
jsonify
cgroups
sysconf
memlimit
gpuload
nvml
setproctitle
psutil
Kalman
frontdoor
namedtuples
zeep
fflags
probesize
wallclock
rknn
socs
pydantic
shms
imdecode
colormap
webui
mse
jsmpeg
unreviewed
Chromecast
Swipeable
flac
scroller
cmdline
toggleable
bottombar
opencv
apexcharts
buildx
mqtt
rawvideo
defragment
Norfair
subclassing
yolo
tensorrt
blackshear
stylelint
HACS
homeassistant
hass
castable
mobiledet
framebuffer
mjpeg
substream
codeowner
noninteractive
restreamed
mountpoint
fstype
OWASP
iotop
letsencrypt
fullchain
lsusb
iostat
usermod
balena
passwordless
debconf
dpkg
poweroff
surveillance
qnap
homekit
colorspace
quantisation
skylake
Cuvid
foscam
onnx
numpy
protobuf
aarch
absdiff
airockchip
Alloc
Amcrest
amdgpu
chipset
referer
mpegts
webp
analyzeduration
Annke
apexcharts
arange
argmax
argmin
argpartition
ascontiguousarray
authelia
authentik
unichip
rebranded
udevadm
autodetected
automations
unraid
hideable
autotrack
autotracked
autotracker
autotracking
balena
Beelink
BGRA
BHWC
blackshear
blakeblackshear
bottombar
buildx
castable
cdist
Celeron
cgroups
chipset
chromadb
Chromecast
cmdline
codeowner
CODEOWNERS
codeproject
colormap
colorspace
comms
ctypeslib
CUDA
Cuvid
Dahua
datasheet
debconf
deci
deepstack
defragment
devcontainer
DEVICEMAP
discardcorrupt
dpkg
dsize
dtype
ECONNRESET
edgetpu
faststart
fflags
ffprobe
fillna
flac
foscam
fourcc
framebuffer
fregate
frégate
fromarray
frombuffer
frontdoor
fstype
fullchain
fullscreen
genai
generativeai
genpts
getpid
gpuload
HACS
Hailo
hass
hconcat
healthcheck
keepalive
hideable
Hikvision
homeassistant
homekit
homography
hsize
hstack
httpx
hwaccel
hwdownload
hwmap
hwupload
iloc
imagestream
imdecode
imencode
imread
imutils
imwrite
interp
iostat
iotop
itemsize
Jellyfin
jetson
jetsons
joserfc
jsmpeg
jsonify
Kalman
keepalive
keepdims
labelmap
letsencrypt
levelname
LIBAVFORMAT
libedgetpu
libnvinfer
libva
libwebp
libx
libyolo
linalg
localzone
logpipe
Loryta
lstsq
lsusb
markupsafe
maxsplit
MEMHOSTALLOC
memlimit
meshgrid
metadatas
migraphx
minilm
mjpeg
mkfifo
mobiledet
mobilenet
modelpath
mosquitto
mountpoint
movflags
mpegts
mqtt
mse
msenc
namedtuples
nbytes
nchw
ndarray
ndimage
nethogs
newaxis
nhwc
NOBLOCK
nobuffer
nokey
NONBLOCK
noninteractive
noprint
Norfair
nptype
NTSC
numpy
nvenc
nvhost
nvml
nvmpi
ollama
onnx
onnxruntime
onvif
ONVIF
openai
opencv
openvino
OWASP
paho
passwordless
popleft
posthog
postprocess
poweroff
preexec
probesize
protobuf
psutil
pubkey
putenv
pycache
pydantic
pyobj
pysqlite
pytz
pywebpush
qnap
quantisation
Radeon
radeonsi
radeontop
rawvideo
rcond
RDONLY
rebranded
referer
Reolink
restream
restreamed
restreaming
rkmpp
rknn
rkrga
rockchip
rocm
rocminfo
rootfs
rtmp
RTSP
ruamel
scroller
setproctitle
setpts
shms
SIGUSR
skylake
sleeptime
SNDMORE
socs
sqliteq
ssdlite
statm
stimeout
stylelint
subclassing
substream
superfast
surveillance
svscan
Swipeable
sysconf
tailscale
Tapo
tensorrt
tflite
thresholded
timelapse
tmpfs
tobytes
toggleable
traefik
tzlocal
Ubiquiti
udev
udevadm
ultrafast
unichip
unidecode
Unifi
unixepoch
unraid
unreviewed
userdata
usermod
vaapi
vainfo
variations
vconcat
vitb
vstream
vsync
wallclock
webp
webpush
webrtc
websockets
webui
werkzeug
workdir
WRONLY
wsgirefserver
wsgiutils
wsize
xaddr
xmaxs
xmins
XPUB
XSUB
ymaxs
ymins
yolo
yolonas
yolox
zeep
zerolatency

View File

@ -179,57 +179,18 @@ jobs:
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
#- name: AMD/ROCm general build
# env:
# AMDGPU: gfx
# HSA_OVERRIDE: 0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
# *.cache-from=type=gha
#- name: AMD/ROCm gfx900
# env:
# AMDGPU: gfx900
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 9.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1030
# env:
# AMDGPU: gfx1030
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1100
# env:
# AMDGPU: gfx1100
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 11.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
# *.cache-from=type=gha
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v3
with:
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:

5
.vscode/launch.json vendored
View File

@ -3,10 +3,9 @@
"configurations": [
{
"name": "Python: Launch Frigate",
"type": "python",
"type": "debugpy",
"request": "launch",
"module": "frigate",
"justMyCode": true
"module": "frigate"
}
]
}

View File

@ -4,8 +4,6 @@ COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.15.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
CURRENT_UID := $(shell id -u)
CURRENT_GID := $(shell id -g)
BOARDS= #Initialized empty
include docker/*/*.mk
@ -18,25 +16,38 @@ version:
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
local: version
docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag frigate:latest \
--load
amd64:
docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/amd64
arm64:
docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/arm64
build: version amd64 arm64
docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/arm64/v8,linux/amd64
push: push-boards
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) \
--platform linux/arm64/v8,linux/amd64 \
--push
run: local
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
run_tests: local
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m unittest
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m mypy --config-file frigate/mypy.ini frigate
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
python3 -u -m unittest
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
python3 -u -m mypy --config-file frigate/mypy.ini frigate
.PHONY: run_tests

View File

@ -7,7 +7,8 @@
"*.db",
"node_modules",
"__pycache__",
"dist"
"dist",
"/audio-labelmap.txt"
],
"language": "en",
"dictionaryDefinitions": [

View File

@ -1,10 +1,15 @@
BOARDS += h8l
local-h8l: version
docker buildx bake --load --file=docker/hailo8l/h8l.hcl --set h8l.tags=frigate:latest-h8l h8l
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=frigate:latest-h8l \
--load
build-h8l: version
docker buildx bake --file=docker/hailo8l/h8l.hcl --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l h8l
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l
push-h8l: build-h8l
docker buildx bake --push --file=docker/hailo8l/h8l.hcl --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l h8l
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \
--push

View File

@ -170,6 +170,9 @@ RUN /build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
COPY docker/main/requirements-wheels-post.txt /requirements-wheels-post.txt
RUN pip3 wheel --no-deps --wheel-dir=/wheels-post -r /requirements-wheels-post.txt
# Collect deps in a single layer
FROM scratch AS deps-rootfs
@ -212,6 +215,14 @@ RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
# We have to uninstall this dependency specifically
# as it will break onnxruntime-openvino
RUN pip3 uninstall -y onnxruntime
RUN --mount=type=bind,from=wheels,source=/wheels-post,target=/deps/wheels \
python3 -m pip install --upgrade pip && \
pip3 install -U /deps/wheels/*.whl
COPY --from=deps-rootfs / /
RUN ldconfig

View File

@ -44,7 +44,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-13-12-57/ffmpeg-n7.0.2-17-gf705bc5b73-linux64-gpl-7.0.tar.xz"
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi
@ -56,19 +56,18 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-13-12-57/ffmpeg-n7.0.2-17-gf705bc5b73-linuxarm64-gpl-7.0.tar.xz"
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# use debian bookworm for hwaccel packages
# use debian bookworm for amd / intel-i965 driver packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd intel-media-va-driver-non-free i965-va-driver \
libmfx-gen1.2 libmfx1 onevpl-tools intel-gpu-tools \
i965-va-driver intel-gpu-tools onevpl-tools \
libva-drm2 \
mesa-va-drivers radeontop
@ -77,6 +76,17 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
i965-va-driver-shaders
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \
libmfx1 libmfxgen1 libvpl2
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then

View File

@ -0,0 +1,3 @@
# ONNX
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'

View File

@ -28,13 +28,11 @@ norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
# OpenVino & ONNX
openvino == 2024.1.*
onnxruntime-openvino == 1.18.* ; platform_machine == 'x86_64'
onnxruntime == 1.18.* ; platform_machine == 'aarch64'
# OpenVino (ONNX installed in wheels-post)
openvino == 2024.3.*
# Embeddings
onnx_clip == 4.0.*
chromadb == 0.5.0
onnx_clip == 4.0.*
# Generative AI
google-generativeai == 0.6.*
ollama == 0.2.*

View File

@ -2,16 +2,19 @@
import json
import os
import shutil
import sys
from pathlib import Path
import yaml
sys.path.insert(0, "/opt/frigate")
from frigate.const import BIRDSEYE_PIPE # noqa: E402
from frigate.ffmpeg_presets import ( # noqa: E402
parse_preset_hardware_acceleration_encode,
from frigate.const import (
BIRDSEYE_PIPE,
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
)
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
sys.path.remove("/opt/frigate")
@ -108,14 +111,12 @@ else:
# ensure ffmpeg path is set correctly
path = config.get("ffmpeg", {}).get("path", "default")
if path == "default":
if int(os.getenv("", "59") or "59") >= 59:
ffmpeg_path = "/usr/lib/ffmpeg/7.0/bin/ffmpeg"
if shutil.which("ffmpeg") is None:
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
elif path == "7.0":
ffmpeg_path = "/usr/lib/ffmpeg/7.0/bin/ffmpeg"
elif path == "5.0":
ffmpeg_path = "/usr/lib/ffmpeg/5.0/bin/ffmpeg"
elif path in INCLUDED_FFMPEG_VERSIONS:
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
else:
ffmpeg_path = f"{path}/bin/ffmpeg"

View File

@ -22,5 +22,6 @@ ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@ -1,10 +1,15 @@
BOARDS += rk
local-rk: version
docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=frigate:latest-rk \
--load
build-rk: version
docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk
push-rk: build-rk
docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk \
--push

View File

@ -23,11 +23,11 @@ COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
@ -69,7 +69,11 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/rootfs/ /
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
#######################################################################
FROM scratch AS rocm-dist
@ -101,6 +105,3 @@ ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
#######################################################################
FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps
# Request yolov8 download at startup
ENV DOWNLOAD_YOLOV8=1

View File

@ -0,0 +1 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl

View File

@ -4,14 +4,50 @@ BOARDS += rocm
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
local-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
--load \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm \
--load
build-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
push-rocm: build-rocm
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
--push \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
--push

View File

@ -1,20 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Compile YoloV8 ONNX files into ROCm MIGraphX files
OVERRIDE=$(cd /opt/frigate && python3 -c 'import frigate.detectors.plugins.rocm as rocm; print(rocm.auto_override_gfx_version())')
if ! test -z "$OVERRIDE"; then
echo "Using HSA_OVERRIDE_GFX_VERSION=${OVERRIDE}"
export HSA_OVERRIDE_GFX_VERSION=$OVERRIDE
fi
for onnx in /config/model_cache/yolov8/*.onnx
do
mxr="${onnx%.onnx}.mxr"
if ! test -f $mxr; then
echo "processing $onnx into $mxr"
/opt/rocm/bin/migraphx-driver compile $onnx --optimize --gpu --enable-offload-copy --binary -o $mxr
fi
done

View File

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/compile-rocm-models/run

View File

@ -1,10 +1,15 @@
BOARDS += rpi
local-rpi: version
docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=frigate:latest-rpi \
--load
build-rpi: version
docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi
push-rpi: build-rpi
docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi \
--push

View File

@ -12,12 +12,28 @@ ARG TARGETARCH
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
# Build CuDNN
FROM wget AS cudnn-deps
ARG COMPUTE_LEVEL
RUN apt-get update \
&& apt-get install -y git build-essential
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
&& dpkg -i cuda-keyring_1.1-1_all.deb \
&& apt-get update \
&& apt-get -y install cuda-toolkit \
&& rm -rf /var/lib/apt/lists/*
FROM tensorrt-base AS frigate-tensorrt
ENV TRT_VER=8.5.3
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
COPY --from=rootfs / /
@ -26,6 +42,7 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \

View File

@ -8,5 +8,7 @@ nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.14.0; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'
onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -7,20 +7,35 @@ JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BAS
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
local-trt: version
$(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt \
--load
local-trt-jp4: version
$(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp4 \
--load
local-trt-jp5: version
$(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp5 \
--load
build-trt:
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5
push-trt: build-trt
$(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \
--push
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \
--push
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \
--push

View File

@ -124,13 +124,25 @@ genai:
model: llava
prompt: "Describe the {label} in these images from the {camera} security camera."
object_prompts:
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc)."
car: "Label the primary vehicle in these images with just the name of the company if it is a delivery vehicle, or the color make and model."
```
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
```yaml
cameras:
front_door:
genai:
prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street."
object_prompts:
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
cat: "Describe the cat in these images (color, size, tail). Indicate whether or not the cat is by the flower pots. If the cat is chasing a mouse, make up a name for the mouse."
```
### Experiment with prompts
Providers also has a public facing chat interface for their models. Download a couple different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
- OpenAI - [ChatGPT](https://chatgpt.com)
- Gemini - [Google AI Studio](https://aistudio.google.com)

View File

@ -65,24 +65,33 @@ Or map in all the `/dev/video*` devices.
## Intel-based CPUs
**Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------ | ----------------------------------- |
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
:::note
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html to figure out what generation your CPU is.)
:::
### Via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs.
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
:::note
With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
:::
### Via Quicksync (>=10th Generation only)
If VAAPI does not work for you, you can try QSV if your processor supports it. QSV must be set specifically based on the video encoding of the stream.
### Via Quicksync
#### H.264 streams

View File

@ -3,6 +3,29 @@ id: object_detectors
title: Object Detectors
---
# Supported Hardware
Frigate supports multiple different detectors that work on different types of hardware:
**Most Hardware**
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
**AMD**
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
**Intel**
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
**Nvidia**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
# Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, `rknn`, and `hailo8l`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
@ -122,6 +145,22 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2024/about-openvino/release-notes-openvino/system-requirements.html)
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml
detectors:
ov_0:
type: openvino
device: GPU
ov_1:
type: openvino
device: GPU
```
:::
### Supported Models
#### SSDLite MobileNet v2
@ -278,6 +317,173 @@ model:
height: 320
```
## AMD/ROCm GPU detector
### Setup
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
When running docker directly the following flags should be added for device access:
```bash
$ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
```yaml
services:
frigate:
---
devices:
- /dev/dri
- /dev/kfd
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
### Docker settings for overriding the GPU chipset
Your GPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
For the rocm frigate build there is some automatic detection:
- gfx90c -> 9.0.0
- gfx1031 -> 10.3.0
- gfx1103 -> 11.0.0
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
```bash
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
```yaml
services:
frigate:
...
environment:
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
- if things are not working check the frigate docker logs
#### Figuring out if AMD/ROCm is working and found your GPU
```bash
$ docker exec -it frigate /opt/rocm/bin/rocminfo
```
#### Figuring out your AMD GPU chipset version:
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
```bash
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
```
### Supported Models
There is no default model provided, the following formats are supported:
#### YOLO-NAS
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
:::
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: rocm
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## ONNX
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml
detectors:
onnx_0:
type: onnx
onnx_1:
type: onnx
```
:::
### Supported Models
There is no default model provided, the following formats are supported:
#### YOLO-NAS
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
:::
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: onnx
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## Deepstack / CodeProject.AI Server Detector
The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking.
@ -389,7 +595,7 @@ $ cat /sys/kernel/debug/rknpu/load
## Hailo-8l
This detector is available if you are using the Raspberry Pi 5 with Hailo-8L AI Kit. This has not been tested using the Hailo-8L with other hardware.
This detector is available for use with Hailo-8 AI Acceleration Module.
### Configuration

View File

@ -504,7 +504,7 @@ semantic_search:
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
# the camera level (enabled: False) to enhance privacy for indoor cameras.
genai:
# Optional: Enable Google Gemini description generation (default: shown below)
# Optional: Enable AI description generation (default: shown below)
enabled: False
# Required if enabled: Provider must be one of ollama, gemini, or openai
provider: ollama
@ -712,6 +712,18 @@ cameras:
# By default the cameras are sorted alphabetically.
order: 0
# Optional: Configuration for AI generated tracked object descriptions
genai:
# Optional: Enable AI description generation (default: shown below)
enabled: False
# Optional: The default prompt for generating descriptions. Can use replacement
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
# Optional: Object specific prompts to customize description results
# Format: {label}: {prompt}
object_prompts:
person: "My special person prompt."
# Optional
ui:
# Optional: Set a timezone to use in the UI (default: use browser local time)

View File

@ -87,6 +87,10 @@ Inference speeds will vary greatly depending on the GPU and the model used.
| Quadro P400 2GB | 20 - 25 ms |
| Quadro P2000 | ~ 12 ms |
#### AMD GPUs
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
### Community Supported:
#### Nvidia Jetson

928
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -14,15 +14,15 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.4.0",
"@docusaurus/preset-classic": "^3.4.0",
"@docusaurus/theme-mermaid": "^3.4.0",
"@docusaurus/core": "^3.5.2",
"@docusaurus/preset-classic": "^3.5.2",
"@docusaurus/theme-mermaid": "^3.5.2",
"@mdx-js/react": "^3.0.0",
"clsx": "^2.0.0",
"prism-react-renderer": "^2.1.0",
"prism-react-renderer": "^2.4.0",
"raw-loader": "^4.0.2",
"react": "^18.2.0",
"react-dom": "^18.2.0"
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
"browserslist": {
"production": [
@ -39,7 +39,7 @@
"devDependencies": {
"@docusaurus/module-type-aliases": "^3.4.0",
"@docusaurus/types": "^3.4.0",
"@types/react": "^18.2.79"
"@types/react": "^18.3.7"
},
"engines": {
"node": ">=18.0"

View File

@ -1,17 +1,28 @@
import faulthandler
import logging
import threading
from flask import cli
from frigate.app import FrigateApp
faulthandler.enable()
threading.current_thread().name = "frigate"
def main() -> None:
faulthandler.enable()
# Clear all existing handlers.
logging.basicConfig(
level=logging.INFO,
handlers=[],
force=True,
)
threading.current_thread().name = "frigate"
cli.show_server_banner = lambda *x: None
# Run the main application.
FrigateApp().start()
cli.show_server_banner = lambda *x: None
if __name__ == "__main__":
frigate_app = FrigateApp()
frigate_app.start()
main()

View File

@ -353,7 +353,10 @@ def events_search():
after = request.args.get("after", type=float)
before = request.args.get("before", type=float)
if not query:
# for similarity search
event_id = request.args.get("event_id", type=str)
if not query and not event_id:
return make_response(
jsonify(
{
@ -432,7 +435,7 @@ def events_search():
if search_type == "similarity":
# Grab the ids of events that match the thumbnail image embeddings
try:
search_event: Event = Event.get(Event.id == query)
search_event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
return make_response(
jsonify(

View File

@ -149,9 +149,9 @@ def export_delete(id: str):
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
file_list = process.open_files()
if file_list:
for nt in file_list:
if nt.path.startswith(EXPORT_DIR):
files_in_use.append(nt.path.split("/")[-1])
except psutil.Error:

View File

@ -46,7 +46,7 @@ from frigate.events.audio import listen_to_audio
from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor
from frigate.log import log_process, root_configurer
from frigate.log import log_thread
from frigate.models import (
Event,
Export,
@ -116,15 +116,6 @@ class FrigateApp:
else:
logger.debug(f"Skipping directory: {d}")
def init_logger(self) -> None:
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
self.log_process.daemon = True
self.log_process.start()
self.processes["logger"] = self.log_process.pid or 0
root_configurer(self.log_queue)
def init_config(self) -> None:
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
@ -679,6 +670,7 @@ class FrigateApp:
logger.info("********************************************************")
logger.info("********************************************************")
@log_thread()
def start(self) -> None:
parser = argparse.ArgumentParser(
prog="Frigate",
@ -687,7 +679,6 @@ class FrigateApp:
parser.add_argument("--validate-config", action="store_true")
args = parser.parse_args()
self.init_logger()
logger.info(f"Starting Frigate ({VERSION})")
try:
@ -714,13 +705,11 @@ class FrigateApp:
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")
self.log_process.terminate()
sys.exit(1)
if args.validate_config:
print("*************************************************************")
print("*** Your config file is valid. ***")
print("*************************************************************")
self.log_process.terminate()
sys.exit(0)
self.set_environment_vars()
self.set_log_levels()
@ -737,7 +726,6 @@ class FrigateApp:
self.init_dispatcher()
except Exception as e:
print(e)
self.log_process.terminate()
sys.exit(1)
self.start_detectors()
self.start_video_output_processor()
@ -866,7 +854,4 @@ class FrigateApp:
shm.close()
shm.unlink()
self.log_process.terminate()
self.log_process.join()
os._exit(os.EX_OK)

View File

@ -106,10 +106,10 @@ class WebPushClient(Communicator): # type: ignore[misc]
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Wrapper for publishing when client is in valid state."""
# check for updated notification config
_, updated_notif_config = self.config_subscriber.check_for_update()
_, updated_notification_config = self.config_subscriber.check_for_update()
if updated_notif_config:
self.config.notifications = updated_notif_config
if updated_notification_config:
self.config.notifications = updated_notification_config
if not self.config.notifications.enabled:
return

View File

@ -3,6 +3,7 @@ from __future__ import annotations
import json
import logging
import os
import shutil
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
@ -25,7 +26,9 @@ from frigate.const import (
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
DEFAULT_DB_PATH,
DEFAULT_FFMPEG_VERSION,
FREQUENCY_STATS_POINTS,
INCLUDED_FFMPEG_VERSIONS,
MAX_PRE_CAPTURE,
REGEX_CAMERA_NAME,
YAML_EXT,
@ -762,8 +765,14 @@ class GenAIConfig(FrigateBaseModel):
object_prompts: Dict[str, str] = Field(default={}, title="Object specific prompts.")
class GenAICameraConfig(FrigateBaseModel):
# uses BaseModel because some global attributes are not available at the camera level
class GenAICameraConfig(BaseModel):
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
prompt: str = Field(
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
title="Default caption prompt.",
)
object_prompts: Dict[str, str] = Field(default={}, title="Object specific prompts.")
class AudioConfig(FrigateBaseModel):
@ -888,28 +897,24 @@ class FfmpegConfig(FrigateBaseModel):
@property
def ffmpeg_path(self) -> str:
if self.path == "default":
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59:
return "/usr/lib/ffmpeg/7.0/bin/ffmpeg"
if shutil.which("ffmpeg") is None:
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
else:
return "ffmpeg"
elif self.path == "7.0":
return "/usr/lib/ffmpeg/7.0/bin/ffmpeg"
elif self.path == "5.0":
return "/usr/lib/ffmpeg/5.0/bin/ffmpeg"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
else:
return f"{self.path}/bin/ffmpeg"
@property
def ffprobe_path(self) -> str:
if self.path == "default":
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59:
return "/usr/lib/ffmpeg/7.0/bin/ffprobe"
if shutil.which("ffprobe") is None:
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
else:
return "ffprobe"
elif self.path == "7.0":
return "/usr/lib/ffmpeg/7.0/bin/ffprobe"
elif self.path == "5.0":
return "/usr/lib/ffmpeg/5.0/bin/ffprobe"
elif self.path in INCLUDED_FFMPEG_VERSIONS:
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
else:
return f"{self.path}/bin/ffprobe"
@ -1187,7 +1192,7 @@ class CameraConfig(FrigateBaseModel):
+ ffmpeg_output_args
)
# if there arent any outputs enabled for this input
# if there aren't any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
@ -1519,7 +1524,7 @@ class FrigateConfig(FrigateBaseModel):
"live": ...,
"objects": ...,
"review": ...,
"genai": {"enabled"},
"genai": ...,
"motion": ...,
"detect": ...,
"ffmpeg": ...,

View File

@ -12,7 +12,7 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
# Attribute & Object Consts
# Attribute & Object constants
ATTRIBUTE_LABEL_MAP = {
"person": ["face", "amazon"],
@ -31,7 +31,7 @@ LABEL_NMS_MAP = {
}
LABEL_NMS_DEFAULT = 0.4
# Audio Consts
# Audio constants
AUDIO_DURATION = 0.975
AUDIO_FORMAT = "s16le"
@ -39,17 +39,19 @@ AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000
AUDIO_MIN_CONFIDENCE = 0.5
# DB Consts
# DB constants
MAX_WAL_SIZE = 10 # MB
# Ffmpeg Presets
# Ffmpeg constants
DEFAULT_FFMPEG_VERSION = "7.0"
INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
# Regex Consts
# Regex constants
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@"

View File

@ -24,7 +24,6 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess # Assuming this function is available
# Set up logging
logger = logging.getLogger(__name__)
@ -83,11 +82,11 @@ class HailoDetector(DetectionApi):
self.network_group_params = self.network_group.create_params()
# Create input and output virtual stream parameters
self.input_vstreams_params = InputVStreamParams.make(
self.input_vstream_params = InputVStreamParams.make(
self.network_group,
format_type=self.hef.get_input_vstream_infos()[0].format.type,
)
self.output_vstreams_params = OutputVStreamParams.make(
self.output_vstream_params = OutputVStreamParams.make(
self.network_group, format_type=getattr(FormatType, output_type)
)
@ -146,24 +145,16 @@ class HailoDetector(DetectionApi):
f"[detect_raw] Converted tensor_input to numpy array: shape {tensor_input.shape}"
)
# Preprocess the tensor input using Frigate's preprocess function
processed_tensor = preprocess(
tensor_input, (1, self.h8l_model_height, self.h8l_model_width, 3), np.uint8
)
input_data = tensor_input
logger.debug(
f"[detect_raw] Tensor data and shape after preprocessing: {processed_tensor} {processed_tensor.shape}"
)
input_data = processed_tensor
logger.debug(
f"[detect_raw] Input data for inference shape: {processed_tensor.shape}, dtype: {processed_tensor.dtype}"
f"[detect_raw] Input data for inference shape: {tensor_input.shape}, dtype: {tensor_input.dtype}"
)
try:
with InferVStreams(
self.network_group,
self.input_vstreams_params,
self.output_vstreams_params,
self.input_vstream_params,
self.output_vstream_params,
) as infer_pipeline:
input_dict = {}
if isinstance(input_data, dict):

View File

@ -1,11 +1,15 @@
import logging
import os
import numpy as np
from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
)
logger = logging.getLogger(__name__)
@ -14,6 +18,7 @@ DETECTOR_KEY = "onnx"
class ONNXDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
device: str = Field(default="AUTO", title="Device Type")
class ONNXDetector(DetectionApi):
@ -21,7 +26,7 @@ class ONNXDetector(DetectionApi):
def __init__(self, detector_config: ONNXDetectorConfig):
try:
import onnxruntime
import onnxruntime as ort
logger.info("ONNX: loaded onnxruntime module")
except ModuleNotFoundError:
@ -32,16 +37,79 @@ class ONNXDetector(DetectionApi):
path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}")
self.model = onnxruntime.InferenceSession(path)
providers = (
["CPUExecutionProvider"]
if detector_config.device == "CPU"
else ort.get_available_providers()
)
options = []
for provider in providers:
if provider == "TensorrtExecutionProvider":
os.makedirs(
"/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True
)
options.append(
{
"trt_timing_cache_enable": True,
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
"trt_engine_cache_enable": True,
"trt_dump_ep_context_model": True,
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
"trt_ep_context_file_path": "/config/model_cache/tensorrt/ort",
}
)
elif provider == "OpenVINOExecutionProvider":
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
options.append(
{
"cache_dir": "/config/model_cache/openvino/ort",
"device_type": detector_config.device,
}
)
else:
options.append({})
self.model = ort.InferenceSession(
path, providers=providers, provider_options=options
)
self.h = detector_config.model.height
self.w = detector_config.model.width
self.onnx_model_type = detector_config.model.model_type
self.onnx_model_px = detector_config.model.input_pixel_format
self.onnx_model_shape = detector_config.model.input_tensor
path = detector_config.model.path
logger.info(f"ONNX: {path} loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name
model_input_shape = self.model.get_inputs()[0].shape
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
# ruff: noqa: F841
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
tensor_output = self.model.run(None, {model_input_name: tensor_input})
raise Exception(
"No models are currently supported via onnx. See the docs for more info."
)
if self.onnx_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output[0]
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
# when running in GPU mode, empty predictions in the output have class_id of -1
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.h,
x_min / self.w,
y_max / self.h,
x_max / self.w,
]
return detections
else:
raise Exception(
f"{self.onnx_model_type} is currently not supported for rocm. See the docs for more info on supported models."
)

View File

@ -30,12 +30,6 @@ class OvDetector(DetectionApi):
self.h = detector_config.model.height
self.w = detector_config.model.width
if detector_config.device == "AUTO":
logger.warning(
"OpenVINO AUTO device type is not currently supported. Attempting to use GPU instead."
)
detector_config.device = "GPU"
if not os.path.isfile(detector_config.model.path):
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
raise FileNotFoundError
@ -129,10 +123,10 @@ class OvDetector(DetectionApi):
strides = [8, 16, 32]
hsizes = [self.h // stride for stride in strides]
wsizes = [self.w // stride for stride in strides]
hsize_list = [self.h // stride for stride in strides]
wsize_list = [self.w // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
for hsize, wsize, stride in zip(hsize_list, wsize_list, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
@ -216,10 +210,12 @@ class OvDetector(DetectionApi):
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
dets = dets[conf_mask]
detections = np.concatenate(
(image_pred[:, :5], class_conf, class_pred), axis=1
)
detections = detections[conf_mask]
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
ordered = detections[detections[:, 5].argsort()[::-1]][:20]
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(

View File

@ -17,7 +17,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
model_chache_dir = "/config/model_cache/rknn_cache/"
model_cache_dir = "/config/model_cache/rknn_cache/"
class RknnDetectorConfig(BaseDetectorConfig):
@ -110,7 +110,7 @@ class Rknn(DetectionApi):
if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
model_props["path"] = model_chache_dir + model_props["filename"]
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
@ -125,12 +125,12 @@ class Rknn(DetectionApi):
return model_props
def download_model(self, filename):
if not os.path.isdir(model_chache_dir):
os.mkdir(model_chache_dir)
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
model_chache_dir + filename,
model_cache_dir + filename,
)
def check_config(self, config):

View File

@ -9,8 +9,10 @@ from pydantic import Field
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
)
logger = logging.getLogger(__name__)
@ -74,7 +76,16 @@ class ROCmDetector(DetectionApi):
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
self.h = detector_config.model.height
self.w = detector_config.model.width
self.rocm_model_type = detector_config.model.model_type
self.rocm_model_px = detector_config.model.input_pixel_format
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
@ -84,6 +95,7 @@ class ROCmDetector(DetectionApi):
self.model = migraphx.load(mxr_path)
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if path.endswith(".onnx"):
self.model = migraphx.parse_onnx(path)
elif (
@ -95,30 +107,51 @@ class ROCmDetector(DetectionApi):
self.model = migraphx.parse_tf(path)
else:
raise Exception(f"AMD/ROCm: unknown model format {path}")
logger.info("AMD/ROCm: compiling the model")
self.model.compile(
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
)
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
os.makedirs("/config/model_cache/rocm", exist_ok=True)
migraphx.save(self.model, mxr_path)
logger.info("AMD/ROCm: model loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_parameter_names()[0]
model_input_shape = tuple(
self.model.get_parameter_shapes()[model_input_name].lens()
)
tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
# ruff: noqa: F841
tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens()
)
raise Exception(
"No models are currently supported for rocm. See the docs for more info."
)
if self.rocm_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
# when running in GPU mode, empty predictions in the output have class_id of -1
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.h,
x_min / self.w,
y_max / self.h,
x_max / self.w,
]
return detections
else:
raise Exception(
f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models."
)

View File

@ -285,14 +285,14 @@ class TensorRtDetector(DetectionApi):
boxes, scores, classes
"""
# filter low-conf detections and concatenate results of all yolo layers
detections = []
detection_list = []
for o in trt_outputs:
dets = o.reshape((-1, 7))
dets = dets[dets[:, 4] * dets[:, 6] >= conf_th]
detections.append(dets)
detections = np.concatenate(detections, axis=0)
detections = o.reshape((-1, 7))
detections = detections[detections[:, 4] * detections[:, 6] >= conf_th]
detection_list.append(detections)
detection_list = np.concatenate(detection_list, axis=0)
return detections
return detection_list
def detect_raw(self, tensor_input):
# Input tensor has the shape of the [height, width, 3]

View File

@ -1,36 +0,0 @@
import logging
import cv2
import numpy as np
logger = logging.getLogger(__name__)
def preprocess(tensor_input, model_input_shape, model_input_element_type):
model_input_shape = tuple(model_input_shape)
assert tensor_input.dtype == np.uint8, f"tensor_input.dtype: {tensor_input.dtype}"
if len(tensor_input.shape) == 3:
tensor_input = tensor_input[np.newaxis, :]
if model_input_element_type == np.uint8:
# nothing to do for uint8 model input
assert (
model_input_shape == tensor_input.shape
), f"model_input_shape: {model_input_shape}, tensor_input.shape: {tensor_input.shape}"
return tensor_input
assert (
model_input_element_type == np.float32
), f"model_input_element_type: {model_input_element_type}"
# tensor_input must be nhwc
assert tensor_input.shape[3] == 3, f"tensor_input.shape: {tensor_input.shape}"
if tensor_input.shape[1:3] != model_input_shape[2:4]:
logger.warn(
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
)
# cv2.dnn.blobFromImage is faster than numpying it
return cv2.dnn.blobFromImage(
tensor_input[0],
1.0 / 255,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=False,
)

View File

@ -15,7 +15,7 @@ from frigate.models import Event
# Squelch posthog logging
logging.getLogger("chromadb.telemetry.product.posthog").setLevel(logging.CRITICAL)
# Hotsawp the sqlite3 module for Chroma compatibility
# Hot-swap the sqlite3 module for Chroma compatibility
try:
from chromadb import Collection
from chromadb import HttpClient as ChromaClient
@ -85,7 +85,10 @@ class Embeddings:
@property
def description(self) -> Collection:
return self.client.get_or_create_collection(
name="event_description", embedding_function=MiniLMEmbedding()
name="event_description",
embedding_function=MiniLMEmbedding(
preferred_providers=["CPUExecutionProvider"]
),
)
def reindex(self) -> None:

View File

@ -1,9 +1,13 @@
"""CLIP Embeddings for Frigate."""
import errno
import logging
import os
from pathlib import Path
from typing import Tuple, Union
import onnxruntime as ort
import requests
from chromadb import EmbeddingFunction, Embeddings
from chromadb.api.types import (
Documents,
@ -39,10 +43,49 @@ class Clip(OnnxClip):
models = []
for model_file in [IMAGE_MODEL_FILE, TEXT_MODEL_FILE]:
path = os.path.join(MODEL_CACHE_DIR, "clip", model_file)
models.append(OnnxClip._load_model(path, silent))
models.append(Clip._load_model(path, silent))
return models[0], models[1]
@staticmethod
def _load_model(path: str, silent: bool):
providers = ["CPUExecutionProvider"]
try:
if os.path.exists(path):
return ort.InferenceSession(path, providers=providers)
else:
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
path,
)
except Exception:
s3_url = f"https://lakera-clip.s3.eu-west-1.amazonaws.com/{os.path.basename(path)}"
if not silent:
logging.info(
f"The model file ({path}) doesn't exist "
f"or it is invalid. Downloading it from the public S3 "
f"bucket: {s3_url}." # noqa: E501
)
# Download from S3
# Saving to a temporary file first to avoid corrupting the file
temporary_filename = Path(path).with_name(os.path.basename(path) + ".part")
# Create any missing directories in the path
temporary_filename.parent.mkdir(parents=True, exist_ok=True)
with requests.get(s3_url, stream=True) as r:
r.raise_for_status()
with open(temporary_filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
f.flush()
# Finally move the temporary file to the correct location
temporary_filename.rename(path)
return ort.InferenceSession(path, providers=providers)
class ClipEmbedding(EmbeddingFunction):
"""Embedding function for CLIP model used in Chroma."""

View File

@ -171,8 +171,11 @@ class EmbeddingMaintainer(threading.Thread):
self, event: Event, thumbnails: list[bytes], metadata: dict
) -> None:
"""Embed the description for an event."""
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_description(thumbnails, metadata)
description = self.genai_client.generate_description(
camera_config, thumbnails, metadata
)
if description is None:
logger.debug("Failed to generate description for %s", event.id)

View File

@ -91,10 +91,10 @@ PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,eq=gamma=1.05",
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
"preset-jetson-h264": "-r {0}", # scaled in decoder
"preset-jetson-h265": "-r {0}", # scaled in decoder
"preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
@ -185,6 +185,15 @@ def parse_preset_hardware_acceleration_scale(
else:
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
if (
",hwdownload,format=nv12,eq=gamma=1.05" in scale
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
):
scale.replace(
",hwdownload,format=nv12,eq=gamma=1.05",
":format=nv12,hwdownload,format=nv12,format=yuv420p",
)
scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args)
return scale

View File

@ -4,7 +4,7 @@ import importlib
import os
from typing import Optional
from frigate.config import GenAIConfig, GenAIProviderEnum
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
PROVIDERS = {}
@ -28,11 +28,14 @@ class GenAIClient:
self.provider = self._init_provider()
def generate_description(
self, thumbnails: list[bytes], metadata: dict[str, any]
self,
camera_config: CameraConfig,
thumbnails: list[bytes],
metadata: dict[str, any],
) -> Optional[str]:
"""Generate a description for the frame."""
prompt = self.genai_config.object_prompts.get(
metadata["label"], self.genai_config.prompt
prompt = camera_config.genai.object_prompts.get(
metadata["label"], camera_config.genai.prompt
).format(**metadata)
return self._send(prompt, thumbnails)

View File

@ -1,71 +1,71 @@
# adapted from https://medium.com/@jonathonbao/python3-logging-with-multiprocessing-f51f460b8778
import atexit
import logging
import multiprocessing as mp
import os
import queue
import signal
import threading
from collections import deque
from logging import handlers
from multiprocessing import Queue
from types import FrameType
from contextlib import AbstractContextManager, ContextDecorator
from logging.handlers import QueueHandler, QueueListener
from types import TracebackType
from typing import Deque, Optional
from setproctitle import setproctitle
from typing_extensions import Self
from frigate.util.builtin import clean_camera_user_pass
def listener_configurer() -> None:
root = logging.getLogger()
if root.hasHandlers():
root.handlers.clear()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(
"[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s", "%Y-%m-%d %H:%M:%S"
LOG_HANDLER = logging.StreamHandler()
LOG_HANDLER.setFormatter(
logging.Formatter(
"[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s",
"%Y-%m-%d %H:%M:%S",
)
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
root.setLevel(logging.INFO)
)
LOG_HANDLER.addFilter(
lambda record: not record.getMessage().startswith(
"You are using a scalar distance function"
)
)
def root_configurer(queue: Queue) -> None:
h = handlers.QueueHandler(queue)
root = logging.getLogger()
class log_thread(AbstractContextManager, ContextDecorator):
def __init__(self, *, handler: logging.Handler = LOG_HANDLER):
super().__init__()
if root.hasHandlers():
root.handlers.clear()
self._handler = handler
root.addHandler(h)
root.setLevel(logging.INFO)
log_queue: mp.Queue = mp.Queue()
self._queue_handler = QueueHandler(log_queue)
self._log_listener = QueueListener(
log_queue, self._handler, respect_handler_level=True
)
def log_process(log_queue: Queue) -> None:
threading.current_thread().name = "logger"
setproctitle("frigate.logger")
listener_configurer()
@property
def handler(self) -> logging.Handler:
return self._handler
stop_event = mp.Event()
def _stop_thread(self) -> None:
self._log_listener.stop()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
def __enter__(self) -> Self:
logging.getLogger().addHandler(self._queue_handler)
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
atexit.register(self._stop_thread)
self._log_listener.start()
while True:
try:
record = log_queue.get(block=True, timeout=1.0)
except queue.Empty:
if stop_event.is_set():
break
continue
if record.msg.startswith("You are using a scalar distance function"):
continue
logger = logging.getLogger(record.name)
logger.handle(record)
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_info: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
logging.getLogger().removeHandler(self._queue_handler)
atexit.unregister(self._stop_thread)
self._stop_thread()
# based on https://codereview.stackexchange.com/a/17959

View File

@ -55,13 +55,13 @@ class FrigateMotionDetector(MotionDetector):
# Improve contrast
if self.improve_contrast.value:
minval = np.percentile(resized_frame, 4)
maxval = np.percentile(resized_frame, 96)
min_value = np.percentile(resized_frame, 4)
max_value = np.percentile(resized_frame, 96)
# don't adjust if the image is a single color
if minval < maxval:
resized_frame = np.clip(resized_frame, minval, maxval)
if min_value < max_value:
resized_frame = np.clip(resized_frame, min_value, max_value)
resized_frame = (
((resized_frame - minval) / (maxval - minval)) * 255
((resized_frame - min_value) / (max_value - min_value)) * 255
).astype(np.uint8)
# mask frame
@ -100,13 +100,13 @@ class FrigateMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
contours = imutils.grab_contours(contours)
# loop over the contours
for c in cnts:
for c in contours:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
@ -124,7 +124,7 @@ class FrigateMotionDetector(MotionDetector):
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
# print("--------")
# print(self.frame_counter)
for c in cnts:
for c in contours:
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c)

View File

@ -79,12 +79,15 @@ class ImprovedMotionDetector(MotionDetector):
# Improve contrast
if self.config.improve_contrast:
# TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
min_value = np.percentile(resized_frame, 4).astype(np.uint8)
max_value = np.percentile(resized_frame, 96).astype(np.uint8)
# skip contrast calcs if the image is a single color
if minval < maxval:
if min_value < max_value:
# keep track of the last 50 contrast values
self.contrast_values[self.contrast_values_index] = [minval, maxval]
self.contrast_values[self.contrast_values_index] = [
min_value,
max_value,
]
self.contrast_values_index += 1
if self.contrast_values_index == len(self.contrast_values):
self.contrast_values_index = 0
@ -122,14 +125,14 @@ class ImprovedMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
contours = imutils.grab_contours(contours)
# loop over the contours
total_contour_area = 0
for c in cnts:
for c in contours:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
total_contour_area += contour_area

View File

@ -125,6 +125,7 @@ def run_detector(
start.value = datetime.datetime.now().timestamp()
detections = object_detector.detect_raw(input_frame)
duration = datetime.datetime.now().timestamp() - start.value
frame_manager.close(connection_id)
outputs[connection_id]["np"][:] = detections[:]
out_events[connection_id].set()
start.value = 0.0

View File

@ -1138,12 +1138,14 @@ class TrackedObjectProcessor(threading.Thread):
)
)
or (
not review_config.detections.labels
or obj.obj_data["label"] in review_config.detections.labels
)
and (
not review_config.detections.required_zones
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
(
not review_config.detections.labels
or obj.obj_data["label"] in review_config.detections.labels
)
and (
not review_config.detections.required_zones
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
)
)
):
logger.debug(

View File

@ -268,9 +268,9 @@ class PtzAutoTracker:
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return
movestatus_supported = self.onvif.get_service_capabilities(camera)
move_status_supported = self.onvif.get_service_capabilities(camera)
if movestatus_supported is None or movestatus_supported.lower() != "true":
if move_status_supported is None or move_status_supported.lower() != "true":
logger.warning(
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
)
@ -807,8 +807,8 @@ class PtzAutoTracker:
invalid_delta = np.any(delta > delta_thresh)
# Check variance
stdevs = np.std(velocities, axis=0)
high_variances = np.any(stdevs > var_thresh)
stdev_list = np.std(velocities, axis=0)
high_variances = np.any(stdev_list > var_thresh)
# Check direction difference
velocities = np.round(velocities)

View File

@ -335,6 +335,10 @@ class OnvifController:
)
self._stop(camera_name)
if "pt" not in self.cams[camera_name]["features"]:
logger.error(f"{camera_name} does not support ONVIF pan/tilt movement.")
return
self.cams[camera_name]["active"] = True
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
move_request = self.cams[camera_name]["move_request"]
@ -476,6 +480,10 @@ class OnvifController:
)
self._stop(camera_name)
if "zoom" not in self.cams[camera_name]["features"]:
logger.error(f"{camera_name} does not support ONVIF zooming.")
return
self.cams[camera_name]["active"] = True
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
move_request = self.cams[camera_name]["move_request"]

View File

@ -90,9 +90,9 @@ class RecordingMaintainer(threading.Thread):
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
file_list = process.open_files()
if file_list:
for nt in file_list:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except psutil.Error:

View File

@ -250,7 +250,7 @@ def stats_snapshot(
ffmpeg_pid = (
camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None
)
cpid = (
capture_pid = (
camera_stats["capture_process"].pid
if camera_stats["capture_process"]
else None
@ -262,7 +262,7 @@ def stats_snapshot(
"detection_fps": round(camera_stats["detection_fps"].value, 2),
"detection_enabled": config.cameras[name].detect.enabled,
"pid": pid,
"capture_pid": cpid,
"capture_pid": capture_pid,
"ffmpeg_pid": ffmpeg_pid,
"audio_rms": round(camera_stats["audio_rms"].value, 4),
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),

View File

@ -1,4 +1,4 @@
"""Consts for testing."""
"""Constants for testing."""
TEST_DB = "test.db"
TEST_DB_CLEANUPS = ["test.db", "test.db-shm", "test.db-wal"]

View File

@ -78,7 +78,7 @@ class TestFfmpegPresets(unittest.TestCase):
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
)
assert (
"fps=10,scale_cuda=w=2560:h=1920:format=nv12,hwdownload,format=nv12,format=yuv420p"
"fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.05"
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
)

View File

@ -44,9 +44,10 @@ class TimelineProcessor(threading.Thread):
continue
if input_type == EventTypeEnum.tracked_object:
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
if prev_event_data is not None and event_data is not None:
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
elif input_type == EventTypeEnum.api:
self.handle_api_entry(camera, event_type, event_data)

View File

@ -511,12 +511,12 @@ def reduce_detections(
# due to min score requirement of NMSBoxes
confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(
indices = cv2.dnn.NMSBoxes(
boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT)
)
# add objects
for index in idxs:
for index in indices:
index = index if isinstance(index, np.int32) else index[0]
obj = group[index]
selected_objects.append(obj)

View File

@ -122,7 +122,7 @@ def get_cpu_stats() -> dict[str, dict]:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
start_time = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
@ -131,9 +131,9 @@ def get_cpu_stats() -> dict[str, dict]:
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_start_time_sec = start_time // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_elapsed_sec = system_uptime_sec - process_start_time_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec

View File

@ -28,8 +28,7 @@ from frigate.video import ( # noqa: E402
start_or_restart_ffmpeg,
)
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

379
web/package-lock.json generated
View File

@ -34,6 +34,7 @@
"axios": "^1.7.3",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",
"copy-to-clipboard": "^3.3.3",
"date-fns": "^3.6.0",
"embla-carousel-react": "^8.2.0",
@ -3722,6 +3723,384 @@
"node": ">=6"
}
},
"node_modules/cmdk": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.0.0.tgz",
"integrity": "sha512-gDzVf0a09TvoJ5jnuPvygTB77+XdOSwEmJ88L6XPFPlv7T3RxbP9jgenfylrAMD0+Le1aO0nVjQUzl2g+vjz5Q==",
"license": "MIT",
"dependencies": {
"@radix-ui/react-dialog": "1.0.5",
"@radix-ui/react-primitive": "1.0.3"
},
"peerDependencies": {
"react": "^18.0.0",
"react-dom": "^18.0.0"
}
},
"node_modules/cmdk/node_modules/@radix-ui/primitive": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz",
"integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-compose-refs": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz",
"integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-context": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz",
"integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-dialog": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.5.tgz",
"integrity": "sha512-GjWJX/AUpB703eEBanuBnIWdIXg6NvJFCXcNlSZk4xdszCdhrJgBoUd1cGk67vFO+WdA2pfI/plOpqz/5GUP6Q==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/primitive": "1.0.1",
"@radix-ui/react-compose-refs": "1.0.1",
"@radix-ui/react-context": "1.0.1",
"@radix-ui/react-dismissable-layer": "1.0.5",
"@radix-ui/react-focus-guards": "1.0.1",
"@radix-ui/react-focus-scope": "1.0.4",
"@radix-ui/react-id": "1.0.1",
"@radix-ui/react-portal": "1.0.4",
"@radix-ui/react-presence": "1.0.1",
"@radix-ui/react-primitive": "1.0.3",
"@radix-ui/react-slot": "1.0.2",
"@radix-ui/react-use-controllable-state": "1.0.1",
"aria-hidden": "^1.1.1",
"react-remove-scroll": "2.5.5"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-dismissable-layer": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.5.tgz",
"integrity": "sha512-aJeDjQhywg9LBu2t/At58hCvr7pEm0o2Ke1x33B+MhjNmmZ17sy4KImo0KPLgsnc/zN7GPdce8Cnn0SWvwZO7g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/primitive": "1.0.1",
"@radix-ui/react-compose-refs": "1.0.1",
"@radix-ui/react-primitive": "1.0.3",
"@radix-ui/react-use-callback-ref": "1.0.1",
"@radix-ui/react-use-escape-keydown": "1.0.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-focus-guards": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz",
"integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-focus-scope": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.4.tgz",
"integrity": "sha512-sL04Mgvf+FmyvZeYfNu1EPAaaxD+aw7cYeIB9L9Fvq8+urhltTRaEo5ysKOpHuKPclsZcSUMKlN05x4u+CINpA==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-compose-refs": "1.0.1",
"@radix-ui/react-primitive": "1.0.3",
"@radix-ui/react-use-callback-ref": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-id": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz",
"integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-use-layout-effect": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-portal": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.4.tgz",
"integrity": "sha512-Qki+C/EuGUVCQTOTD5vzJzJuMUlewbzuKyUy+/iHM2uwGiru9gZeBJtHAPKAEkB5KWGi9mP/CHKcY0wt1aW45Q==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-primitive": "1.0.3"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-presence": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz",
"integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-compose-refs": "1.0.1",
"@radix-ui/react-use-layout-effect": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-primitive": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz",
"integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-slot": "1.0.2"
},
"peerDependencies": {
"@types/react": "*",
"@types/react-dom": "*",
"react": "^16.8 || ^17.0 || ^18.0",
"react-dom": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"@types/react-dom": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-slot": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz",
"integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-compose-refs": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz",
"integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-use-controllable-state": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz",
"integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-use-callback-ref": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-use-escape-keydown": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz",
"integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/react-use-callback-ref": "1.0.1"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/@radix-ui/react-use-layout-effect": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz",
"integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/cmdk/node_modules/react-remove-scroll": {
"version": "2.5.5",
"resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz",
"integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==",
"license": "MIT",
"dependencies": {
"react-remove-scroll-bar": "^2.3.3",
"react-style-singleton": "^2.2.1",
"tslib": "^2.1.0",
"use-callback-ref": "^1.3.0",
"use-sidecar": "^1.1.2"
},
"engines": {
"node": ">=10"
},
"peerDependencies": {
"@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0",
"react": "^16.8.0 || ^17.0.0 || ^18.0.0"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",

View File

@ -40,6 +40,7 @@
"axios": "^1.7.3",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"cmdk": "^1.0.0",
"copy-to-clipboard": "^3.3.3",
"date-fns": "^3.6.0",
"embla-carousel-react": "^8.2.0",

View File

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -43,7 +43,6 @@ type SearchFilterGroupProps = {
className: string;
filters?: SearchFilters[];
filter?: SearchFilter;
searchTerm: string;
filterList?: FilterList;
onUpdateFilter: (filter: SearchFilter) => void;
};
@ -51,7 +50,6 @@ export default function SearchFilterGroup({
className,
filters = DEFAULT_REVIEW_FILTERS,
filter,
searchTerm,
filterList,
onUpdateFilter,
}: SearchFilterGroupProps) {
@ -109,11 +107,8 @@ export default function SearchFilterGroup({
return;
}
const cameraConfig = config.cameras[camera];
cameraConfig.review.alerts.required_zones.forEach((zone) => {
zones.add(zone);
});
cameraConfig.review.detections.required_zones.forEach((zone) => {
zones.add(zone);
Object.entries(cameraConfig.zones).map(([name, _]) => {
zones.add(name);
});
});
@ -216,7 +211,7 @@ export default function SearchFilterGroup({
)}
{config?.semantic_search?.enabled &&
filters.includes("source") &&
!searchTerm.includes("similarity:") && (
!filter?.search_type?.includes("similarity") && (
<SearchTypeButton
selectedSearchSources={
filter?.search_type ?? ["thumbnail", "description"]
@ -917,7 +912,7 @@ export function SearchTypeContent({
<div className="my-2.5 flex flex-col gap-2.5">
<FilterSwitch
label="Thumbnail Image"
isChecked={selectedSearchSources?.includes("thumbnail") ?? false}
isChecked={currentSearchSources?.includes("thumbnail") ?? false}
onCheckedChange={(isChecked) => {
const updatedSources = currentSearchSources
? [...currentSearchSources]

View File

@ -32,6 +32,16 @@ export function ThresholdBarGraph({
[data],
);
const yMax = useMemo(() => {
if (unit != "%") {
return undefined;
}
// @ts-expect-error y is valid
const yValues: number[] = data[0].data.map((point) => point?.y);
return Math.max(threshold.warning, ...yValues);
}, [data, threshold, unit]);
const { theme, systemTheme } = useTheme();
const formatTime = useCallback(
@ -130,9 +140,10 @@ export function ThresholdBarGraph({
formatter: (val: number) => Math.ceil(val).toString(),
},
min: 0,
max: yMax,
},
} as ApexCharts.ApexOptions;
}, [graphId, threshold, unit, systemTheme, theme, formatTime]);
}, [graphId, threshold, unit, yMax, systemTheme, theme, formatTime]);
useEffect(() => {
ApexCharts.exec(graphId, "updateOptions", options, true, true);

View File

@ -0,0 +1,46 @@
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
} from "@/components/ui/alert-dialog";
type DeleteSearchDialogProps = {
isOpen: boolean;
onClose: () => void;
onConfirm: () => void;
searchName: string;
};
export function DeleteSearchDialog({
isOpen,
onClose,
onConfirm,
searchName,
}: DeleteSearchDialogProps) {
return (
<AlertDialog open={isOpen} onOpenChange={onClose}>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>Are you sure?</AlertDialogTitle>
<AlertDialogDescription>
This will permanently delete the saved search "{searchName}".
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel onClick={onClose}>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={onConfirm}
className="bg-destructive text-white"
>
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
);
}

View File

@ -0,0 +1,701 @@
import { useState, useRef, useEffect, useCallback } from "react";
import {
LuX,
LuFilter,
LuImage,
LuChevronDown,
LuChevronUp,
LuTrash2,
LuStar,
} from "react-icons/lu";
import {
FilterType,
SavedSearchQuery,
SearchFilter,
SearchSource,
} from "@/types/search";
import useSuggestions from "@/hooks/use-suggestions";
import {
Command,
CommandInput,
CommandList,
CommandGroup,
CommandItem,
} from "@/components/ui/command";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import { cn } from "@/lib/utils";
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { usePersistence } from "@/hooks/use-persistence";
import { SaveSearchDialog } from "./SaveSearchDialog";
import { DeleteSearchDialog } from "./DeleteSearchDialog";
import {
convertLocalDateToTimestamp,
getIntlDateFormat,
} from "@/utils/dateUtil";
import { toast } from "sonner";
type InputWithTagsProps = {
filters: SearchFilter;
setFilters: (filter: SearchFilter) => void;
search: string;
setSearch: (search: string) => void;
allSuggestions: {
[K in keyof SearchFilter]: string[];
};
};
export default function InputWithTags({
filters,
setFilters,
search,
setSearch,
allSuggestions,
}: InputWithTagsProps) {
const [inputValue, setInputValue] = useState(search || "");
const [currentFilterType, setCurrentFilterType] = useState<FilterType | null>(
null,
);
const [inputFocused, setInputFocused] = useState(false);
const [isSimilaritySearch, setIsSimilaritySearch] = useState(false);
const inputRef = useRef<HTMLInputElement>(null);
const commandRef = useRef<HTMLDivElement>(null);
// TODO: search history from browser storage
const [searchHistory, setSearchHistory, searchHistoryLoaded] = usePersistence<
SavedSearchQuery[]
>("frigate-search-history");
const [isSaveDialogOpen, setIsSaveDialogOpen] = useState(false);
const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false);
const [searchToDelete, setSearchToDelete] = useState<string | null>(null);
const handleSetSearchHistory = useCallback(() => {
setIsSaveDialogOpen(true);
}, []);
const handleSaveSearch = useCallback(
(name: string) => {
if (searchHistoryLoaded) {
setSearchHistory([
...(searchHistory ?? []),
{
name: name,
search: search,
filter: filters,
},
]);
}
},
[search, filters, searchHistory, setSearchHistory, searchHistoryLoaded],
);
const handleLoadSavedSearch = useCallback(
(name: string) => {
if (searchHistoryLoaded) {
const savedSearchEntry = searchHistory?.find(
(entry) => entry.name === name,
);
if (savedSearchEntry) {
setFilters(savedSearchEntry.filter!);
setSearch(savedSearchEntry.search);
}
}
},
[searchHistory, searchHistoryLoaded, setFilters, setSearch],
);
const handleDeleteSearch = useCallback((name: string) => {
setSearchToDelete(name);
setIsDeleteDialogOpen(true);
}, []);
const confirmDeleteSearch = useCallback(() => {
if (searchToDelete && searchHistory) {
setSearchHistory(
searchHistory.filter((item) => item.name !== searchToDelete) ?? [],
);
setSearchToDelete(null);
setIsDeleteDialogOpen(false);
}
}, [searchToDelete, searchHistory, setSearchHistory]);
// suggestions
const { suggestions, updateSuggestions } = useSuggestions(
filters,
allSuggestions,
searchHistory,
);
const resetSuggestions = useCallback(
(value: string) => {
setCurrentFilterType(null);
updateSuggestions(value, null);
},
[updateSuggestions],
);
const filterSuggestions = useCallback(
(current_suggestions: string[]) => {
if (!inputValue || currentFilterType) return suggestions;
const words = inputValue.split(/[\s,]+/);
const lastNonEmptyWordIndex = words
.map((word) => word.trim())
.lastIndexOf(words.filter((word) => word.trim() !== "").pop() || "");
const currentWord = words[lastNonEmptyWordIndex];
return current_suggestions.filter((suggestion) =>
suggestion.toLowerCase().includes(currentWord.toLowerCase()),
);
},
[inputValue, suggestions, currentFilterType],
);
const removeFilter = useCallback(
(filterType: FilterType, filterValue: string | number) => {
const newFilters = { ...filters };
if (Array.isArray(newFilters[filterType])) {
(newFilters[filterType] as string[]) = (
newFilters[filterType] as string[]
).filter((v) => v !== filterValue);
if ((newFilters[filterType] as string[]).length === 0) {
delete newFilters[filterType];
}
} else if (filterType === "before" || filterType === "after") {
if (newFilters[filterType] === filterValue) {
delete newFilters[filterType];
}
} else {
delete newFilters[filterType];
}
setFilters(newFilters as SearchFilter);
},
[filters, setFilters],
);
const createFilter = useCallback(
(type: FilterType, value: string) => {
if (allSuggestions[type as FilterType]?.includes(value)) {
const newFilters = { ...filters };
let timestamp = 0;
switch (type) {
case "before":
case "after":
timestamp = convertLocalDateToTimestamp(value);
if (timestamp > 0) {
// Check for conflicts with existing before/after filters
if (
type === "before" &&
filters.after &&
timestamp <= filters.after * 1000
) {
toast.error(
"The 'before' date must be later than the 'after' date.",
{
position: "top-center",
},
);
return;
}
if (
type === "after" &&
filters.before &&
timestamp >= filters.before * 1000
) {
toast.error(
"The 'after' date must be earlier than the 'before' date.",
{
position: "top-center",
},
);
return;
}
if (type === "before") {
timestamp -= 1;
}
newFilters[type] = timestamp / 1000;
}
break;
case "search_type":
if (!newFilters.search_type) newFilters.search_type = [];
if (
!(newFilters.search_type as SearchSource[]).includes(
value as SearchSource,
)
) {
(newFilters.search_type as SearchSource[]).push(
value as SearchSource,
);
}
break;
case "event_id":
newFilters.event_id = value;
break;
default:
// Handle array types (cameras, labels, subLabels, zones)
if (!newFilters[type]) newFilters[type] = [];
if (Array.isArray(newFilters[type])) {
if (!(newFilters[type] as string[]).includes(value)) {
(newFilters[type] as string[]).push(value);
}
}
break;
}
setFilters(newFilters);
setInputValue((prev) => prev.replace(`${type}:${value}`, "").trim());
setCurrentFilterType(null);
}
},
[filters, setFilters, allSuggestions],
);
// handlers
const handleFilterCreation = useCallback(
(filterType: FilterType, filterValue: string) => {
const trimmedValue = filterValue.trim();
if (
allSuggestions[filterType]?.includes(trimmedValue) ||
((filterType === "before" || filterType === "after") &&
trimmedValue.match(/^\d{8}$/))
) {
createFilter(filterType, trimmedValue);
setInputValue((prev) => {
const regex = new RegExp(
`${filterType}:${filterValue.trim()}[,\\s]*`,
);
const newValue = prev.replace(regex, "").trim();
return newValue.endsWith(",")
? newValue.slice(0, -1).trim()
: newValue;
});
setCurrentFilterType(null);
}
},
[allSuggestions, createFilter],
);
const handleInputChange = useCallback(
(value: string) => {
setInputValue(value);
const words = value.split(/[\s,]+/);
const lastNonEmptyWordIndex = words
.map((word) => word.trim())
.lastIndexOf(words.filter((word) => word.trim() !== "").pop() || "");
const currentWord = words[lastNonEmptyWordIndex];
const isLastCharSpaceOrComma = value.endsWith(" ") || value.endsWith(",");
// Check if the current word is a filter type
const filterTypeMatch = currentWord.match(/^(\w+):(.*)$/);
if (filterTypeMatch) {
const [_, filterType, filterValue] = filterTypeMatch as [
string,
FilterType,
string,
];
// Check if filter type is valid
if (
filterType in allSuggestions ||
filterType === "before" ||
filterType === "after"
) {
setCurrentFilterType(filterType);
if (filterType === "before" || filterType === "after") {
// For before and after, we don't need to update suggestions
if (filterValue.match(/^\d{8}$/)) {
handleFilterCreation(filterType, filterValue);
}
} else {
updateSuggestions(filterValue, filterType);
// Check if the last character is a space or comma
if (isLastCharSpaceOrComma) {
handleFilterCreation(filterType, filterValue);
}
}
} else {
resetSuggestions(value);
}
} else {
resetSuggestions(value);
}
},
[updateSuggestions, resetSuggestions, allSuggestions, handleFilterCreation],
);
const handleInputFocus = useCallback(() => {
setInputFocused(true);
}, []);
const handleClearInput = useCallback(() => {
setInputFocused(false);
setInputValue("");
resetSuggestions("");
setSearch("");
inputRef?.current?.blur();
setFilters({});
setCurrentFilterType(null);
setIsSimilaritySearch(false);
}, [setFilters, resetSuggestions, setSearch]);
const handleInputBlur = useCallback((e: React.FocusEvent) => {
if (
commandRef.current &&
!commandRef.current.contains(e.relatedTarget as Node)
) {
setInputFocused(false);
}
}, []);
const handleSuggestionClick = useCallback(
(suggestion: string) => {
if (currentFilterType) {
// Apply the selected suggestion to the current filter type
createFilter(currentFilterType, suggestion);
setInputValue((prev) => {
const regex = new RegExp(`${currentFilterType}:[^\\s,]*`, "g");
return prev.replace(regex, "").trim();
});
} else if (suggestion in allSuggestions) {
// Set the suggestion as a new filter type
setCurrentFilterType(suggestion as FilterType);
setInputValue((prev) => {
// Remove any partial match of the filter type, including incomplete matches
const words = prev.split(/\s+/);
const lastWord = words[words.length - 1];
if (lastWord && suggestion.startsWith(lastWord.toLowerCase())) {
words[words.length - 1] = suggestion + ":";
} else {
words.push(suggestion + ":");
}
return words.join(" ").trim();
});
} else {
// Add the suggestion as a standalone word
setInputValue((prev) => `${prev}${suggestion} `);
}
inputRef.current?.focus();
},
[createFilter, currentFilterType, allSuggestions],
);
const handleSearch = useCallback(
(value: string) => {
setSearch(value);
setInputFocused(false);
inputRef?.current?.blur();
},
[setSearch],
);
const handleInputKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLInputElement>) => {
if (
e.key === "Enter" &&
inputValue.trim() !== "" &&
filterSuggestions(suggestions).length == 0
) {
e.preventDefault();
handleSearch(inputValue);
}
},
[inputValue, handleSearch, filterSuggestions, suggestions],
);
// effects
useEffect(() => {
updateSuggestions(inputValue, currentFilterType);
}, [currentFilterType, inputValue, updateSuggestions]);
useEffect(() => {
if (filters?.search_type && filters?.search_type.includes("similarity")) {
setIsSimilaritySearch(true);
setInputValue("");
} else {
setIsSimilaritySearch(false);
setInputValue(search || "");
}
}, [filters, search]);
return (
<>
<Command
shouldFilter={false}
ref={commandRef}
className="rounded-md border"
>
<div className="relative">
<CommandInput
ref={inputRef}
value={inputValue}
onValueChange={handleInputChange}
onFocus={handleInputFocus}
onBlur={handleInputBlur}
onKeyDown={handleInputKeyDown}
className="text-md h-10 pr-24"
placeholder="Search..."
/>
<div className="absolute right-3 top-0 flex h-full flex-row items-center justify-center gap-5">
{(search || Object.keys(filters).length > 0) && (
<Tooltip>
<TooltipTrigger>
<LuX
className="size-4 cursor-pointer text-secondary-foreground"
onClick={handleClearInput}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>Clear search</TooltipContent>
</TooltipPortal>
</Tooltip>
)}
{(search || Object.keys(filters).length > 0) && (
<Tooltip>
<TooltipTrigger>
<LuStar
className="size-4 cursor-pointer text-secondary-foreground"
onClick={handleSetSearchHistory}
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>Save search</TooltipContent>
</TooltipPortal>
</Tooltip>
)}
{isSimilaritySearch && (
<Tooltip>
<TooltipTrigger className="cursor-default">
<LuImage
aria-label="Similarity search active"
className="size-4 text-selected"
/>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>Similarity search active</TooltipContent>
</TooltipPortal>
</Tooltip>
)}
<Popover>
<PopoverTrigger asChild>
<button
className="focus:outline-none"
aria-label="Filter information"
>
<LuFilter
aria-label="Filters active"
className={cn(
"size-4",
Object.keys(filters).length > 0
? "text-selected"
: "text-secondary-foreground",
)}
/>
</button>
</PopoverTrigger>
<PopoverContent className="w-80">
<div className="space-y-2">
<h3 className="font-medium">How to use text filters</h3>
<p className="text-sm text-muted-foreground">
Filters help you narrow down your search results. Here's how
to use them:
</p>
<ul className="list-disc pl-5 text-sm text-primary-variant">
<li>
Type a filter name followed by a colon (e.g., "cameras:").
</li>
<li>
Select a value from the suggestions or type your own.
</li>
<li>
Use multiple filters by adding them one after another.
</li>
<li>
Date filters (before: and after:) use{" "}
{getIntlDateFormat()} format.
</li>
<li>Remove filters by clicking the 'x' next to them.</li>
</ul>
<p className="text-sm text-muted-foreground">
Example:{" "}
<code className="text-primary">
cameras:front_door label:person before:01012024
</code>
</p>
</div>
</PopoverContent>
</Popover>
{inputFocused ? (
<LuChevronUp
onClick={() => setInputFocused(false)}
className="size-4 cursor-pointer text-secondary-foreground"
/>
) : (
<LuChevronDown
onClick={() => setInputFocused(true)}
className="size-4 cursor-pointer text-secondary-foreground"
/>
)}
</div>
</div>
<CommandList
className={cn(
"scrollbar-container border-t duration-200 animate-in fade-in",
inputFocused ? "visible" : "hidden",
)}
>
{(Object.keys(filters).length > 0 || isSimilaritySearch) && (
<CommandGroup heading="Active Filters">
<div className="my-2 flex flex-wrap gap-2 px-2">
{isSimilaritySearch && (
<span className="inline-flex items-center whitespace-nowrap rounded-full bg-blue-100 px-2 py-0.5 text-sm text-blue-800">
Similarity Search
<button
onClick={handleClearInput}
className="ml-1 focus:outline-none"
aria-label="Clear similarity search"
>
<LuX className="h-3 w-3" />
</button>
</span>
)}
{Object.entries(filters).map(([filterType, filterValues]) =>
Array.isArray(filterValues)
? filterValues
.filter(() => filterType !== "query")
.filter(() => !filterValues.includes("similarity"))
.map((value, index) => (
<span
key={`${filterType}-${index}`}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
>
{filterType.replaceAll("_", " ")}:{" "}
{value.replaceAll("_", " ")}
<button
onClick={() =>
removeFilter(filterType as FilterType, value)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${value.replaceAll("_", " ")} filter`}
>
<LuX className="h-3 w-3" />
</button>
</span>
))
: filterType !== "event_id" && (
<span
key={filterType}
className="inline-flex items-center whitespace-nowrap rounded-full bg-green-100 px-2 py-0.5 text-sm capitalize text-green-800"
>
{filterType}:
{filterType === "before" || filterType === "after"
? new Date(
(filterType === "before"
? (filterValues as number) + 1
: (filterValues as number)) * 1000,
).toLocaleDateString(
window.navigator?.language || "en-US",
)
: filterValues}
<button
onClick={() =>
removeFilter(
filterType as FilterType,
filterValues as string | number,
)
}
className="ml-1 focus:outline-none"
aria-label={`Remove ${filterType}:${filterValues} filter`}
>
<LuX className="h-3 w-3" />
</button>
</span>
),
)}
</div>
</CommandGroup>
)}
{!currentFilterType &&
!inputValue &&
searchHistoryLoaded &&
(searchHistory?.length ?? 0) > 0 && (
<CommandGroup heading="Saved Searches">
{searchHistory?.map((suggestion, index) => (
<CommandItem
key={index}
className="flex cursor-pointer items-center justify-between"
onSelect={() => handleLoadSavedSearch(suggestion.name)}
>
<span>{suggestion.name}</span>
<Tooltip>
<TooltipTrigger asChild>
<button
onClick={(e) => {
e.stopPropagation();
handleDeleteSearch(suggestion.name);
}}
className="focus:outline-none"
>
<LuTrash2 className="h-4 w-4 text-secondary-foreground" />
</button>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>Delete saved search</TooltipContent>
</TooltipPortal>
</Tooltip>
</CommandItem>
))}
</CommandGroup>
)}
<CommandGroup
heading={currentFilterType ? "Filter Values" : "Filters"}
>
{filterSuggestions(suggestions)
.filter(
(item) =>
!searchHistory?.some((history) => history.name === item),
)
.map((suggestion, index) => (
<CommandItem
key={index + (searchHistory?.length ?? 0)}
className="cursor-pointer"
onSelect={() => handleSuggestionClick(suggestion)}
>
{suggestion}
</CommandItem>
))}
</CommandGroup>
</CommandList>
</Command>
<SaveSearchDialog
isOpen={isSaveDialogOpen}
onClose={() => setIsSaveDialogOpen(false)}
onSave={handleSaveSearch}
/>
<DeleteSearchDialog
isOpen={isDeleteDialogOpen}
onClose={() => setIsDeleteDialogOpen(false)}
onConfirm={confirmDeleteSearch}
searchName={searchToDelete || ""}
/>
</>
);
}

View File

@ -0,0 +1,74 @@
import {
Dialog,
DialogContent,
DialogHeader,
DialogTitle,
DialogFooter,
DialogDescription,
} from "@/components/ui/dialog";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { useState } from "react";
import { isMobile } from "react-device-detect";
import { toast } from "sonner";
type SaveSearchDialogProps = {
isOpen: boolean;
onClose: () => void;
onSave: (name: string) => void;
};
export function SaveSearchDialog({
isOpen,
onClose,
onSave,
}: SaveSearchDialogProps) {
const [searchName, setSearchName] = useState("");
const handleSave = () => {
if (searchName.trim()) {
onSave(searchName.trim());
setSearchName("");
toast.success(`Search (${searchName.trim()}) has been saved.`, {
position: "top-center",
});
onClose();
}
};
return (
<Dialog open={isOpen} onOpenChange={onClose}>
<DialogContent
onOpenAutoFocus={(e) => {
if (isMobile) {
e.preventDefault();
}
}}
>
<DialogHeader>
<DialogTitle>Save Search</DialogTitle>
<DialogDescription className="sr-only">
Provide a name for this saved search.
</DialogDescription>
</DialogHeader>
<Input
value={searchName}
className="text-md"
onChange={(e) => setSearchName(e.target.value)}
placeholder="Enter a name for your search"
/>
<DialogFooter>
<Button onClick={onClose}>Cancel</Button>
<Button
onClick={handleSave}
variant="select"
className="mb-2 md:mb-0"
>
Save
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
);
}

View File

@ -121,7 +121,7 @@ export function AnnotationSettingsPane({
}
return (
<div className="space-y-3 rounded-lg border border-secondary-foreground bg-background_alt p-2">
<div className="mb-3 space-y-3 rounded-lg border border-secondary-foreground bg-background_alt p-2">
<Heading as="h4" className="my-2">
Annotation Settings
</Heading>
@ -152,8 +152,8 @@ export function AnnotationSettingsPane({
render={({ field }) => (
<FormItem>
<FormLabel>Annotation Offset</FormLabel>
<div className="flex flex-col gap-8 md:flex-row-reverse">
<div className="my-5 flex flex-row items-center gap-3 rounded-lg bg-destructive/50 p-3 text-sm text-primary-variant md:my-0">
<div className="flex flex-col gap-3 md:flex-row-reverse md:gap-8">
<div className="flex flex-row items-center gap-3 rounded-lg bg-destructive/50 p-3 text-sm text-primary-variant md:my-0 md:my-5">
<PiWarningCircle className="size-24" />
<div>
This data comes from your camera's detect feed but is
@ -161,7 +161,7 @@ export function AnnotationSettingsPane({
unlikely that the two streams are perfectly in sync. As a
result, the bounding box and the footage will not line up
perfectly. However, the <code>annotation_offset</code>{" "}
field in your config can be used to adjust this.
field can be used to adjust this.
<div className="mt-2 flex items-center text-primary">
<Link
to="https://docs.frigate.video/configuration/reference"

View File

@ -357,10 +357,7 @@ export default function ObjectLifecycle({
)}
<div className="relative flex flex-col items-center justify-center">
<Carousel
className={cn("m-0 w-full", fullscreen && isDesktop && "w-[75%]")}
setApi={setMainApi}
>
<Carousel className="m-0 w-full" setApi={setMainApi}>
<CarouselContent>
{eventSequence.map((item, index) => (
<CarouselItem key={index}>

View File

@ -234,7 +234,7 @@ export default function ReviewDetailDialog({
)}
{pane == "details" && selectedEvent && (
<div className="scrollbar-container overflow-x-none mt-0 flex size-full flex-col gap-2 overflow-y-auto overflow-x-hidden">
<div className="mt-0 flex size-full flex-col gap-2">
<ObjectLifecycle event={selectedEvent} setPane={setPane} />
</div>
)}
@ -370,7 +370,9 @@ function EventItem({
<Chip
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
onClick={() => {
navigate(`/explore?similarity_search_id=${event.id}`);
navigate(
`/explore?search_type=similarity&event_id=${event.id}`,
);
}}
>
<FaImages className="size-4 text-white" />

View File

@ -0,0 +1,153 @@
import * as React from "react";
import { type DialogProps } from "@radix-ui/react-dialog";
import { Command as CommandPrimitive } from "cmdk";
import { Search } from "lucide-react";
import { cn } from "@/lib/utils";
import { Dialog, DialogContent } from "@/components/ui/dialog";
const Command = React.forwardRef<
React.ElementRef<typeof CommandPrimitive>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive>
>(({ className, ...props }, ref) => (
<CommandPrimitive
ref={ref}
className={cn(
"flex h-full w-full flex-col overflow-hidden rounded-md bg-popover text-popover-foreground",
className,
)}
{...props}
/>
));
Command.displayName = CommandPrimitive.displayName;
interface CommandDialogProps extends DialogProps {}
const CommandDialog = ({ children, ...props }: CommandDialogProps) => {
return (
<Dialog {...props}>
<DialogContent className="overflow-hidden p-0 shadow-lg">
<Command className="[&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-muted-foreground [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-group]]:px-2 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5">
{children}
</Command>
</DialogContent>
</Dialog>
);
};
const CommandInput = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.Input>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Input>
>(({ className, ...props }, ref) => (
<div className="flex items-center px-3" cmdk-input-wrapper="">
<Search className="mr-2 h-4 w-4 shrink-0 opacity-50" />
<CommandPrimitive.Input
ref={ref}
className={cn(
"flex h-11 w-full rounded-md bg-transparent py-3 text-sm outline-none placeholder:text-muted-foreground disabled:cursor-not-allowed disabled:opacity-50",
className,
)}
{...props}
/>
</div>
));
CommandInput.displayName = CommandPrimitive.Input.displayName;
const CommandList = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.List>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.List>
>(({ className, ...props }, ref) => (
<CommandPrimitive.List
ref={ref}
className={cn("max-h-[300px] overflow-y-auto overflow-x-hidden", className)}
{...props}
/>
));
CommandList.displayName = CommandPrimitive.List.displayName;
const CommandEmpty = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.Empty>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Empty>
>((props, ref) => (
<CommandPrimitive.Empty
ref={ref}
className="py-6 text-center text-sm"
{...props}
/>
));
CommandEmpty.displayName = CommandPrimitive.Empty.displayName;
const CommandGroup = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.Group>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Group>
>(({ className, ...props }, ref) => (
<CommandPrimitive.Group
ref={ref}
className={cn(
"overflow-hidden p-1 text-foreground [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:py-1.5 [&_[cmdk-group-heading]]:text-xs [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group-heading]]:text-muted-foreground",
className,
)}
{...props}
/>
));
CommandGroup.displayName = CommandPrimitive.Group.displayName;
const CommandSeparator = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.Separator>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Separator>
>(({ className, ...props }, ref) => (
<CommandPrimitive.Separator
ref={ref}
className={cn("-mx-1 h-px bg-border", className)}
{...props}
/>
));
CommandSeparator.displayName = CommandPrimitive.Separator.displayName;
const CommandItem = React.forwardRef<
React.ElementRef<typeof CommandPrimitive.Item>,
React.ComponentPropsWithoutRef<typeof CommandPrimitive.Item>
>(({ className, ...props }, ref) => (
<CommandPrimitive.Item
ref={ref}
className={cn(
"relative flex cursor-default select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none data-[disabled=true]:pointer-events-none data-[selected='true']:bg-accent data-[selected=true]:text-accent-foreground data-[disabled=true]:opacity-50",
className,
)}
{...props}
/>
));
CommandItem.displayName = CommandPrimitive.Item.displayName;
const CommandShortcut = ({
className,
...props
}: React.HTMLAttributes<HTMLSpanElement>) => {
return (
<span
className={cn(
"ml-auto text-xs tracking-widest text-muted-foreground",
className,
)}
{...props}
/>
);
};
CommandShortcut.displayName = "CommandShortcut";
export {
Command,
CommandDialog,
CommandInput,
CommandList,
CommandEmpty,
CommandGroup,
CommandItem,
CommandShortcut,
CommandSeparator,
};

View File

@ -125,7 +125,7 @@ export function useSearchEffect(
const remove = callback(param[1]);
if (remove) {
setSearchParams();
setSearchParams(undefined, { replace: true });
}
}, [param, callback, setSearchParams]);
}

View File

@ -0,0 +1,63 @@
import { FilterType, SavedSearchQuery, SearchFilter } from "@/types/search";
import { useCallback, useState } from "react";
// Custom hook for managing suggestions
export type UseSuggestionsType = (
filters: SearchFilter,
allSuggestions: { [K in keyof SearchFilter]: string[] },
searchHistory: SavedSearchQuery[],
) => ReturnType<typeof useSuggestions>;
// Define and export the useSuggestions hook
export default function useSuggestions(
filters: SearchFilter,
allSuggestions: { [K in keyof SearchFilter]: string[] },
searchHistory?: SavedSearchQuery[],
) {
const [suggestions, setSuggestions] = useState<string[]>([]);
const updateSuggestions = useCallback(
(value: string, currentFilterType: FilterType | null) => {
if (currentFilterType && currentFilterType in allSuggestions) {
const filterValue = value.split(":").pop() || "";
const currentFilterValues = filters[currentFilterType] || [];
setSuggestions(
allSuggestions[currentFilterType]?.filter(
(item) =>
item.toLowerCase().startsWith(filterValue.toLowerCase()) &&
!(currentFilterValues as (string | number)[]).includes(item),
) ?? [],
);
} else {
const availableFilters = Object.keys(allSuggestions).filter(
(filter) => {
const filterKey = filter as FilterType;
const filterValues = filters[filterKey];
const suggestionValues = allSuggestions[filterKey];
if (!filterValues) return true;
if (
Array.isArray(filterValues) &&
Array.isArray(suggestionValues)
) {
return filterValues.length < suggestionValues.length;
}
return false;
},
);
setSuggestions([
...(searchHistory?.map((search) => search.name) ?? []),
...availableFilters,
"before",
"after",
]);
}
},
[filters, allSuggestions, searchHistory],
);
return {
suggestions,
updateSuggestions,
};
}

View File

@ -31,6 +31,7 @@ function ConfigEditor() {
const editorRef = useRef<monaco.editor.IStandaloneCodeEditor | null>(null);
const modelRef = useRef<monaco.editor.ITextModel | null>(null);
const configRef = useRef<HTMLDivElement | null>(null);
const schemaConfiguredRef = useRef(false);
const onHandleSaveConfig = useCallback(
async (save_option: SaveOptions) => {
@ -79,50 +80,59 @@ function ConfigEditor() {
return;
}
if (modelRef.current != null) {
// we don't need to recreate the editor if it already exists
editorRef.current?.layout();
return;
const modelUri = monaco.Uri.parse(
`a://b/api/config/schema_${Date.now()}.json`,
);
// Configure Monaco YAML schema only once
if (!schemaConfiguredRef.current) {
configureMonacoYaml(monaco, {
enableSchemaRequest: true,
hover: true,
completion: true,
validate: true,
format: true,
schemas: [
{
uri: `${apiHost}api/config/schema.json`,
fileMatch: [String(modelUri)],
},
],
});
schemaConfiguredRef.current = true;
}
const modelUri = monaco.Uri.parse("a://b/api/config/schema.json");
if (monaco.editor.getModels().length > 0) {
modelRef.current = monaco.editor.getModel(modelUri);
} else {
if (!modelRef.current) {
modelRef.current = monaco.editor.createModel(config, "yaml", modelUri);
} else {
modelRef.current.setValue(config);
}
configureMonacoYaml(monaco, {
enableSchemaRequest: true,
hover: true,
completion: true,
validate: true,
format: true,
schemas: [
{
uri: `${apiHost}api/config/schema.json`,
fileMatch: [String(modelUri)],
},
],
});
const container = configRef.current;
if (container != null) {
if (container && !editorRef.current) {
editorRef.current = monaco.editor.create(container, {
language: "yaml",
model: modelRef.current,
scrollBeyondLastLine: false,
theme: (systemTheme || theme) == "dark" ? "vs-dark" : "vs-light",
});
} else if (editorRef.current) {
editorRef.current.setModel(modelRef.current);
}
return () => {
configRef.current = null;
modelRef.current = null;
if (editorRef.current) {
editorRef.current.dispose();
editorRef.current = null;
}
if (modelRef.current) {
modelRef.current.dispose();
modelRef.current = null;
}
schemaConfiguredRef.current = false;
};
});
}, [config, apiHost, systemTheme, theme]);
// monitoring state

View File

@ -14,6 +14,10 @@ import {
ReviewSummary,
SegmentedReviewData,
} from "@/types/review";
import {
getBeginningOfDayTimestamp,
getEndOfDayTimestamp,
} from "@/utils/dateUtil";
import EventView from "@/views/events/EventView";
import { RecordingView } from "@/views/recording/RecordingView";
import axios from "axios";
@ -43,10 +47,17 @@ export default function Events() {
.get(`review/${reviewId}`)
.then((resp) => {
if (resp.status == 200 && resp.data) {
const startTime = resp.data.start_time - REVIEW_PADDING;
const date = new Date(startTime * 1000);
setReviewFilter({
after: getBeginningOfDayTimestamp(date),
before: getEndOfDayTimestamp(date),
});
setRecording(
{
camera: resp.data.camera,
startTime: resp.data.start_time - REVIEW_PADDING,
startTime,
severity: resp.data.severity,
},
true,

View File

@ -1,105 +1,60 @@
import { useApiFilterArgs } from "@/hooks/use-api-filter";
import { useCameraPreviews } from "@/hooks/use-camera-previews";
import { useOverlayState, useSearchEffect } from "@/hooks/use-overlay-state";
import { FrigateConfig } from "@/types/frigateConfig";
import { RecordingStartingPoint } from "@/types/record";
import { SearchFilter, SearchQuery, SearchResult } from "@/types/search";
import { TimeRange } from "@/types/timeline";
import { RecordingView } from "@/views/recording/RecordingView";
import SearchView from "@/views/search/SearchView";
import { useCallback, useEffect, useMemo, useState } from "react";
import useSWR from "swr";
import { TbExclamationCircle } from "react-icons/tb";
import useSWRInfinite from "swr/infinite";
const API_LIMIT = 25;
export default function Explore() {
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
// search field handler
const [searchTimeout, setSearchTimeout] = useState<NodeJS.Timeout>();
const [search, setSearch] = useState("");
const [searchTerm, setSearchTerm] = useState("");
const [recording, setRecording] =
useOverlayState<RecordingStartingPoint>("recording");
// search filter
const similaritySearch = useMemo(() => {
if (!searchTerm.includes("similarity:")) {
return undefined;
}
return searchTerm.split(":")[1];
}, [searchTerm]);
const [searchFilter, setSearchFilter, searchSearchParams] =
useApiFilterArgs<SearchFilter>();
// search api
const searchTerm = useMemo(
() => searchSearchParams?.["query"] || "",
[searchSearchParams],
);
useSearchEffect("similarity_search_id", (similarityId) => {
setSearch(`similarity:${similarityId}`);
// @ts-expect-error we want to clear this
setSearchFilter({ ...searchFilter, similarity_search_id: undefined });
return false;
});
const similaritySearch = useMemo(
() => searchSearchParams["search_type"] == "similarity",
[searchSearchParams],
);
useEffect(() => {
if (searchTimeout) {
clearTimeout(searchTimeout);
if (!searchTerm && !search) {
return;
}
setSearchTimeout(
setTimeout(() => {
setSearchTimeout(undefined);
setSearchTerm(search);
}, 750),
);
// we only want to update the searchTerm when search changes
// switch back to normal search when query is entered
setSearchFilter({
...searchFilter,
search_type:
similaritySearch && search ? undefined : searchFilter?.search_type,
event_id: similaritySearch && search ? undefined : searchFilter?.event_id,
query: search.length > 0 ? search : undefined,
});
// only update when search is updated
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [search]);
const searchQuery: SearchQuery = useMemo(() => {
if (similaritySearch) {
return [
"events/search",
{
query: similaritySearch,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
include_thumbnails: 0,
search_type: "similarity",
},
];
// no search parameters
if (searchSearchParams && Object.keys(searchSearchParams).length === 0) {
return null;
}
if (searchTerm) {
return [
"events/search",
{
query: searchTerm,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
search_type: searchSearchParams["search_type"],
include_thumbnails: 0,
},
];
}
if (searchSearchParams && Object.keys(searchSearchParams).length !== 0) {
// parameters, but no search term and not similarity
if (
searchSearchParams &&
Object.keys(searchSearchParams).length !== 0 &&
!searchTerm &&
!similaritySearch
) {
return [
"events",
{
@ -118,15 +73,38 @@ export default function Explore() {
];
}
return null;
// parameters and search term
if (!similaritySearch) {
setSearch(searchTerm);
}
return [
"events/search",
{
query: similaritySearch ? undefined : searchTerm,
cameras: searchSearchParams["cameras"],
labels: searchSearchParams["labels"],
sub_labels: searchSearchParams["subLabels"],
zones: searchSearchParams["zones"],
before: searchSearchParams["before"],
after: searchSearchParams["after"],
search_type: searchSearchParams["search_type"],
event_id: searchSearchParams["event_id"],
include_thumbnails: 0,
},
];
}, [searchTerm, searchSearchParams, similaritySearch]);
// paging
// usually slow only on first run while downloading models
const [isSlowLoading, setIsSlowLoading] = useState(false);
const getKey = (
pageIndex: number,
previousPageData: SearchResult[] | null,
): SearchQuery => {
if (isSlowLoading && !similaritySearch) return null;
if (previousPageData && !previousPageData.length) return null; // reached the end
if (!searchQuery) return null;
@ -148,8 +126,14 @@ export default function Explore() {
const { data, size, setSize, isValidating } = useSWRInfinite<SearchResult[]>(
getKey,
{
revalidateFirstPage: false,
revalidateFirstPage: true,
revalidateAll: false,
onLoadingSlow: () => {
if (!similaritySearch) {
setIsSlowLoading(true);
}
},
loadingTimeout: 10000,
},
);
@ -180,108 +164,40 @@ export default function Explore() {
}
}, [isReachingEnd, isLoadingMore, setSize, size, searchResults, searchQuery]);
// previews
const previewTimeRange = useMemo<TimeRange>(() => {
if (!searchResults) {
return { after: 0, before: 0 };
}
return {
after: Math.min(...searchResults.map((res) => res.start_time)),
before: Math.max(
...searchResults.map((res) => res.end_time ?? Date.now() / 1000),
),
};
}, [searchResults]);
const allPreviews = useCameraPreviews(previewTimeRange, {
autoRefresh: false,
fetchPreviews: searchResults != undefined,
});
// selection
const onOpenSearch = useCallback(
(item: SearchResult) => {
setRecording({
camera: item.camera,
startTime: item.start_time,
severity: "alert",
});
},
[setRecording],
);
const selectedReviewData = useMemo(() => {
if (!recording) {
return undefined;
}
if (!config) {
return undefined;
}
if (!searchResults) {
return undefined;
}
const allCameras = searchFilter?.cameras ?? Object.keys(config.cameras);
return {
camera: recording.camera,
start_time: recording.startTime,
allCameras: allCameras,
};
// previews will not update after item is selected
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [recording, searchResults]);
const selectedTimeRange = useMemo(() => {
if (!recording) {
return undefined;
}
const time = new Date(recording.startTime * 1000);
time.setUTCMinutes(0, 0, 0);
const start = time.getTime() / 1000;
time.setHours(time.getHours() + 2);
const end = time.getTime() / 1000;
return {
after: start,
before: end,
};
}, [recording]);
if (recording) {
if (selectedReviewData && selectedTimeRange) {
return (
<RecordingView
startCamera={selectedReviewData.camera}
startTime={selectedReviewData.start_time}
allCameras={selectedReviewData.allCameras}
allPreviews={allPreviews}
timeRange={selectedTimeRange}
updateFilter={setSearchFilter}
return (
<>
{isSlowLoading && !similaritySearch ? (
<div className="absolute inset-0 left-1/2 top-1/2 flex h-96 w-96 -translate-x-1/2 -translate-y-1/2">
<div className="flex flex-col items-center justify-center rounded-lg bg-background/50 p-5">
<p className="my-5 text-lg">Search Unavailable</p>
<TbExclamationCircle className="mb-3 size-10" />
<p className="max-w-96 text-center">
If this is your first time using Search, be patient while Frigate
downloads the necessary embeddings models. Check Frigate logs.
</p>
</div>
</div>
) : (
<SearchView
search={search}
searchTerm={searchTerm}
searchFilter={searchFilter}
searchResults={searchResults}
isLoading={(isLoadingInitialData || isLoadingMore) ?? true}
setSearch={setSearch}
setSimilaritySearch={(search) => {
setSearchFilter({
...searchFilter,
search_type: ["similarity"],
event_id: search.id,
});
}}
setSearchFilter={setSearchFilter}
onUpdateFilter={setSearchFilter}
loadMore={loadMore}
hasMore={!isReachingEnd}
/>
);
}
} else {
return (
<SearchView
search={search}
searchTerm={searchTerm}
searchFilter={searchFilter}
searchResults={searchResults}
isLoading={(isLoadingInitialData || isLoadingMore) ?? true}
setSearch={setSearch}
setSimilaritySearch={(search) => setSearch(`similarity:${search.id}`)}
onUpdateFilter={setSearchFilter}
onOpenSearch={onOpenSearch}
loadMore={loadMore}
hasMore={!isReachingEnd}
/>
);
}
)}
</>
);
}

View File

@ -29,6 +29,7 @@ export type SearchResult = {
};
export type SearchFilter = {
query?: string;
cameras?: string[];
labels?: string[];
subLabels?: string[];
@ -54,4 +55,11 @@ export type SearchQueryParams = {
page?: number;
};
export type SearchQuery = [string, SearchQueryParams] | null;
export type SearchQuery = [string, SearchQueryParams] | null;
export type FilterType = Exclude<keyof SearchFilter, "query">;
export type SavedSearchQuery = {
name: string;
search: string;
filter: SearchFilter | undefined;
};

View File

@ -285,6 +285,11 @@ export function endOfHourOrCurrentTime(timestamp: number) {
return Math.min(timestamp, now.getTime() / 1000);
}
export function getBeginningOfDayTimestamp(date: Date) {
date.setHours(0, 0, 0, 0);
return date.getTime() / 1000;
}
export function getEndOfDayTimestamp(date: Date) {
date.setHours(23, 59, 59, 999);
return date.getTime() / 1000;
@ -296,3 +301,75 @@ export function isCurrentHour(timestamp: number) {
return timestamp > now.getTime() / 1000;
}
export const convertLocalDateToTimestamp = (dateString: string): number => {
// Ensure the date string is in the correct format (8 digits)
if (!/^\d{8}$/.test(dateString)) {
return 0;
}
// Determine the local date format
const format = new Intl.DateTimeFormat()
.formatToParts(new Date())
.reduce((acc, part) => {
if (part.type === "day") acc.push("D");
if (part.type === "month") acc.push("M");
if (part.type === "year") acc.push("Y");
return acc;
}, [] as string[])
.join("");
let day: string, month: string, year: string;
// Parse the date string according to the detected format
switch (format) {
case "DMY":
[day, month, year] = [
dateString.slice(0, 2),
dateString.slice(2, 4),
dateString.slice(4),
];
break;
case "MDY":
[month, day, year] = [
dateString.slice(0, 2),
dateString.slice(2, 4),
dateString.slice(4),
];
break;
case "YMD":
[year, month, day] = [
dateString.slice(0, 2),
dateString.slice(2, 4),
dateString.slice(4),
];
break;
default:
return 0;
}
// Create a Date object based on the local timezone
const localDate = new Date(`${year}-${month}-${day}T00:00:00`);
// Check if the date is valid
if (isNaN(localDate.getTime())) {
return 0;
}
// Convert local date to UTC timestamp
const timestamp = localDate.getTime();
return timestamp;
};
export function getIntlDateFormat() {
return new Intl.DateTimeFormat()
.formatToParts(new Date())
.reduce((acc, part) => {
if (part.type === "day") acc.push("DD");
if (part.type === "month") acc.push("MM");
if (part.type === "year") acc.push("YYYY");
return acc;
}, [] as string[])
.join("");
}

View File

@ -3,7 +3,6 @@ import SearchFilterGroup from "@/components/filter/SearchFilterGroup";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import Chip from "@/components/indicators/Chip";
import SearchDetailDialog from "@/components/overlay/detail/SearchDetailDialog";
import { Input } from "@/components/ui/input";
import { Toaster } from "@/components/ui/sonner";
import {
Tooltip,
@ -12,16 +11,17 @@ import {
} from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import { FrigateConfig } from "@/types/frigateConfig";
import { SearchFilter, SearchResult } from "@/types/search";
import { SearchFilter, SearchResult, SearchSource } from "@/types/search";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { isMobileOnly } from "react-device-detect";
import { LuImage, LuSearchX, LuText, LuXCircle } from "react-icons/lu";
import { LuImage, LuSearchX, LuText } from "react-icons/lu";
import useSWR from "swr";
import ExploreView from "../explore/ExploreView";
import useKeyboardListener, {
KeyModifiers,
} from "@/hooks/use-keyboard-listener";
import scrollIntoView from "scroll-into-view-if-needed";
import InputWithTags from "@/components/input/InputWithTags";
type SearchViewProps = {
search: string;
@ -31,8 +31,8 @@ type SearchViewProps = {
isLoading: boolean;
setSearch: (search: string) => void;
setSimilaritySearch: (search: SearchResult) => void;
setSearchFilter: (filter: SearchFilter) => void;
onUpdateFilter: (filter: SearchFilter) => void;
onOpenSearch: (item: SearchResult) => void;
loadMore: () => void;
hasMore: boolean;
};
@ -44,6 +44,7 @@ export default function SearchView({
isLoading,
setSearch,
setSimilaritySearch,
setSearchFilter,
onUpdateFilter,
loadMore,
hasMore,
@ -52,6 +53,69 @@ export default function SearchView({
revalidateOnFocus: false,
});
// suggestions values
const allLabels = useMemo<string[]>(() => {
if (!config) {
return [];
}
const labels = new Set<string>();
const cameras = searchFilter?.cameras || Object.keys(config.cameras);
cameras.forEach((camera) => {
if (camera == "birdseye") {
return;
}
const cameraConfig = config.cameras[camera];
cameraConfig.objects.track.forEach((label) => {
labels.add(label);
});
if (cameraConfig.audio.enabled_in_config) {
cameraConfig.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return [...labels].sort();
}, [config, searchFilter]);
const { data: allSubLabels } = useSWR("sub_labels");
const allZones = useMemo<string[]>(() => {
if (!config) {
return [];
}
const zones = new Set<string>();
const cameras = searchFilter?.cameras || Object.keys(config.cameras);
cameras.forEach((camera) => {
if (camera == "birdseye") {
return;
}
const cameraConfig = config.cameras[camera];
Object.entries(cameraConfig.zones).map(([name, _]) => {
zones.add(name);
});
});
return [...zones].sort();
}, [config, searchFilter]);
const suggestionsValues = useMemo(
() => ({
cameras: Object.keys(config?.cameras || {}),
labels: Object.values(allLabels || {}),
zones: Object.values(allZones || {}),
sub_labels: allSubLabels,
search_type: ["thumbnail", "description"] as SearchSource[],
}),
[config, allLabels, allZones, allSubLabels],
);
// remove duplicate event ids
const uniqueResults = useMemo(() => {
@ -192,7 +256,7 @@ export default function SearchView({
<div
className={cn(
"flex flex-col items-start space-y-2 pl-2 pr-2 md:mb-2 md:pl-3 lg:h-10 lg:flex-row lg:items-center lg:space-y-0",
"flex flex-col items-start space-y-2 pl-2 pr-2 md:mb-2 md:pl-3 lg:relative lg:h-10 lg:flex-row lg:items-center lg:space-y-0",
config?.semantic_search?.enabled
? "justify-between"
: "justify-center",
@ -200,24 +264,14 @@ export default function SearchView({
)}
>
{config?.semantic_search?.enabled && (
<div
className={cn(
"relative w-full",
hasExistingSearch ? "lg:mr-3 lg:w-1/3" : "lg:ml-[25%] lg:w-1/2",
)}
>
<Input
className="text-md w-full bg-muted pr-10"
placeholder={"Search for a tracked object..."}
value={search}
onChange={(e) => setSearch(e.target.value)}
<div className={cn("z-[41] w-full lg:absolute lg:top-0 lg:w-1/3")}>
<InputWithTags
filters={searchFilter ?? {}}
setFilters={setSearchFilter}
search={search}
setSearch={setSearch}
allSuggestions={suggestionsValues}
/>
{search && (
<LuXCircle
className="absolute right-2 top-1/2 h-5 w-5 -translate-y-1/2 cursor-pointer text-muted-foreground hover:text-primary"
onClick={() => setSearch("")}
/>
)}
</div>
)}
@ -227,7 +281,6 @@ export default function SearchView({
"w-full justify-between md:justify-start lg:justify-end",
)}
filter={searchFilter}
searchTerm={searchTerm}
onUpdateFilter={onUpdateFilter}
/>
)}