diff --git a/Dockerfile b/Dockerfile index d1dbd0755..63ae4e212 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,7 +30,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ FROM wget AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.5.0/go2rtc_linux_${TARGETARCH}" \ +RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.6.0/go2rtc_linux_${TARGETARCH}" \ && chmod +x go2rtc @@ -262,15 +262,35 @@ FROM deps AS frigate WORKDIR /opt/frigate/ COPY --from=rootfs / / +# Build TensorRT-specific library +FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps + +RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ + /tensorrt_libyolo.sh + # Frigate w/ TensorRT Support as separate image FROM frigate AS frigate-tensorrt + +#Disable S6 Global timeout +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +ENV TRT_VER=8.5.3 +ENV YOLO_MODELS="yolov7-tiny-416" + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/support/tensorrt_detector/rootfs/ / + RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl && \ - ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \ ldconfig # Dev Container w/ TRT FROM devcontainer AS devcontainer-trt +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/support/tensorrt_detector/rootfs/ / +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/build_nginx.sh b/docker/build_nginx.sh index 1e7bfad21..56c9a146d 100755 --- a/docker/build_nginx.sh +++ b/docker/build_nginx.sh @@ -2,10 +2,10 @@ set -euxo pipefail -NGINX_VERSION="1.22.1" -VOD_MODULE_VERSION="1.30" -SECURE_TOKEN_MODULE_VERSION="1.4" -RTMP_MODULE_VERSION="1.2.1" +NGINX_VERSION="1.25.1" +VOD_MODULE_VERSION="1.31" +SECURE_TOKEN_MODULE_VERSION="1.5" +RTMP_MODULE_VERSION="1.2.2" cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list diff --git a/docker/install_deps.sh b/docker/install_deps.sh index 25b6951b5..7d5242d83 100755 --- a/docker/install_deps.sh +++ b/docker/install_deps.sh @@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then libva-drm2 mesa-va-drivers fi -apt-get purge gnupg apt-transport-https wget xz-utils -y +apt-get purge gnupg apt-transport-https xz-utils -y apt-get clean autoclean -y apt-get autoremove --purge -y rm -rf /var/lib/apt/lists/* diff --git a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf similarity index 94% rename from docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf rename to docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf index d4248d047..fe16ed9c5 100644 --- a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf +++ b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf @@ -1,3 +1,4 @@ +/usr/local/lib /usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run new file mode 100755 index 000000000..5f0e43553 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run @@ -0,0 +1,53 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Generate models for the TensorRT detector + +set -o errexit -o nounset -o pipefail + +MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} +OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" + +# Create output folder +mkdir -p ${OUTPUT_FOLDER} + +FIRST_MODEL=true +MODEL_CONVERT="" + +for model in ${YOLO_MODELS//,/ } +do + # Remove old link in case path/version changed + rm -f ${MODEL_CACHE_DIR}/${model}.trt + + if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then + if [[ ${FIRST_MODEL} = true ]]; then + MODEL_CONVERT="${model}" + FIRST_MODEL=false; + else + MODEL_CONVERT+=",${model}"; + fi + else + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + fi +done + +if [[ -z ${MODEL_CONVERT} ]]; then + echo "No models to convert." + exit 0 +fi + +echo "Generating the following TRT Models: ${MODEL_CONVERT}" + +# Build trt engine +cd /usr/local/src/tensorrt_demos/yolo + +# Download yolo weights +./download_yolo.sh $MODEL_CONVERT > /dev/null + +for model in ${MODEL_CONVERT//,/ } +do + echo "Converting ${model} model" + python3 yolo_to_onnx.py -m ${model} > /dev/null + python3 onnx_to_tensorrt.py -m ${model} > /dev/null + cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt +done diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up new file mode 100644 index 000000000..b9de40ad0 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/trt-model-prepare/run diff --git a/docker/support/tensorrt_detector/tensorrt_libyolo.sh b/docker/support/tensorrt_detector/tensorrt_libyolo.sh new file mode 100755 index 000000000..e6fc415e5 --- /dev/null +++ b/docker/support/tensorrt_detector/tensorrt_libyolo.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -euxo pipefail + +SCRIPT_DIR="/usr/local/src/tensorrt_demos" + +# Clone tensorrt_demos repo +git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download + +# Build libyolo +cd ./tensorrt_demos/plugins && make all +cp libyolo_layer.so /usr/local/lib/libyolo_layer.so + +# Store yolo scripts for later conversion +cd ../ +mkdir -p ${SCRIPT_DIR}/plugins +cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so +cp -a yolo ${SCRIPT_DIR}/ diff --git a/docker/tensorrt_models.sh b/docker/tensorrt_models.sh deleted file mode 100755 index 957e817d6..000000000 --- a/docker/tensorrt_models.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -CUDA_HOME=/usr/local/cuda -LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 -OUTPUT_FOLDER=/tensorrt_models -echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}" - -# Create output folder -mkdir -p ${OUTPUT_FOLDER} - -# Install packages -pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3 - -# Clone tensorrt_demos repo -git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos - -# Build libyolo -cd /tensorrt_demos/plugins && make all -cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so - -# Download yolo weights -cd /tensorrt_demos/yolo && ./download_yolo.sh - -# Build trt engine -cd /tensorrt_demos/yolo - -for model in ${YOLO_MODELS//,/ } -do - python3 yolo_to_onnx.py -m ${model} - python3 onnx_to_tensorrt.py -m ${model} - cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; -done diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index af2d7872f..4eb7c2821 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -120,7 +120,7 @@ NOTE: The folder that is mapped from the host needs to be the folder that contai ## Custom go2rtc version -Frigate currently includes go2rtc v1.5.0, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.6.0, there may be certain cases where you want to run a different version of go2rtc. To do this: diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md new file mode 100644 index 000000000..50fd0978f --- /dev/null +++ b/docs/docs/configuration/autotracking.md @@ -0,0 +1,77 @@ +--- +id: autotracking +title: Autotracking +--- + +An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame. + +## Autotracking behavior + +Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it. + +Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will autotrack that new object. + +When tracking has ended, Frigate will return to the camera preset specified by the `return_preset` configuration entry. + +## Checking ONVIF camera support + +Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry). + +Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported. + +Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera. + +## Configuration + +First, set up a PTZ preset in your camera's firmware and give it a name. + +Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. + +An [ONVIF connection](cameras.md) is required for autotracking to function. + +Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. + +```yaml +cameras: + ptzcamera: + ... + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF camera preset to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 +``` + +## Best practices and considerations + +Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR. + +The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases (especially for faster moving objects), the default 5 fps is insufficient for the motion estimator to perform accurately. 10 fps is the current recommendation. Higher frame rates will likely not be more performant and will only slow down Frigate and the motion estimator. Adjust your camera to output at least 10 frames per second and change the `fps` parameter in the [detect configuration](index.md) of your configuration file. + +A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. If Frigate already has trouble keeping track of your object, the autotracker will struggle as well. + +The autotracker will add PTZ motion requests to a queue while the motor is moving. Once the motor stops, the events in the queue will be executed together as one large move (rather than incremental moves). If your PTZ's motor is slow, you may not be able to reliably autotrack fast moving objects. + +## Usage applications + +In security and surveillance, it's common to use "spotter" cameras in combination with your PTZ. When your fixed spotter camera detects an object, you could use an automation platform like Home Assistant to move the PTZ to a specific preset so that Frigate can begin automatically tracking the object. For example: a residence may have fixed cameras on the east and west side of the property, capturing views up and down a street. When the spotter camera on the west side detects a person, a Home Assistant automation could move the PTZ to a camera preset aimed toward the west. When the object enters the specified zone, Frigate's autotracker could then continue to track the person as it moves out of view of any of the fixed cameras. diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index eb704358f..09b3fe01f 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -141,7 +141,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 8f907cb3f..1804003a5 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -66,3 +66,5 @@ cameras: ``` then PTZ controls will be available in the cameras WebUI. + +An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index f23a32270..d00d1d1d6 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -145,6 +145,12 @@ audio: enabled: False # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) max_not_heard: 30 + # Optional: Configure the min rms volume required to run audio detection (default: shown below) + # As a rule of thumb: + # - 200 - high sensitivity + # - 500 - medium sensitivity + # - 1000 - low sensitivity + min_volume: 500 # Optional: Types of audio to listen for (default: shown below) listen: - bark @@ -412,7 +418,7 @@ rtmp: enabled: False # Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.5.0) +# Uses https://github.com/AlexxIT/go2rtc (v1.6.0) go2rtc: # Optional: jsmpeg stream configuration for WebUI @@ -555,6 +561,21 @@ cameras: user: admin # Optional: password for login. password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF camera preset to return to when tracking is over. + return_preset: preset_name + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 # Optional: Configuration for how to sort the cameras in the Birdseye view. birdseye: diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 697b11347..c66365241 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -115,4 +115,4 @@ services: ::: -See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-webrtc) for more information about this. +See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#module-webrtc) for more information about this. diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 3f48423bc..d684a2917 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -174,9 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t ### Minimum Hardware Support -The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. - -> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support. +The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. @@ -192,22 +190,15 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben ### Generate Models -The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models. +The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. -To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script. +The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. -```bash -mkdir trt-models -wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh -chmod +x tensorrt_models.sh -docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh -``` +To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. -The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config. +If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. -If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it. - -Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. +Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. ``` yolov3-288 @@ -237,11 +228,20 @@ yolov7x-640 yolov7x-320 ``` +An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this: + +```yml +frigate: + environment: + - YOLO_MODELS="yolov4-608,yolov7x-640" + - USE_FP16=false +``` + ### Configuration Parameters The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. -The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated. +The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. ```yaml detectors: @@ -250,7 +250,7 @@ detectors: device: 0 #This is the default, select the first GPU model: - path: /trt-models/yolov7-tiny-416.trt + path: /config/model_cache/tensorrt/yolov7-tiny-416.trt input_tensor: nchw input_pixel_format: rgb width: 416 diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index 61393a91c..914f65c33 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,7 +7,7 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.5.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.6.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#configuration) for more advanced configurations and features. :::note @@ -134,7 +134,7 @@ cameras: ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: NOTE: The output will need to be passed with two curly braces `{{output}}` diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 36233ea68..5daf8fe3b 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known ### TensorRT -The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). +The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). Inference speeds will vary greatly depending on the GPU and the model used. `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 55adc48c7..08fa7dae6 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -10,7 +10,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect # Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#module-streams), not just rtsp. ```yaml go2rtc: @@ -23,7 +23,7 @@ The easiest live view to get working is MSE. After adding this to the config, re ### What if my video doesn't play? -If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: +If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: ```yaml go2rtc: diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 378eadb3e..43539d461 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -63,7 +63,9 @@ Message published for each changed event. The first message is published when th "stationary": false, // whether or not the object is considered stationary "motionless_count": 0, // number of frames the object has been motionless "position_changes": 2, // number of times the object has moved from a stationary position - "attributes": [], // set of unique attributes that have been identified on the object + "attributes": { + "face": 0.64 + }, // attributes with top score that have been identified on the object at any point "current_attributes": [] // detailed data about the current attributes in this frame }, "after": { @@ -90,13 +92,15 @@ Message published for each changed event. The first message is published when th "stationary": false, // whether or not the object is considered stationary "motionless_count": 0, // number of frames the object has been motionless "position_changes": 2, // number of times the object has changed position - "attributes": ["face"], // set of unique attributes that have been identified on the object + "attributes": { + "face": 0.86 + }, // attributes with top score that have been identified on the object at any point "current_attributes": [ // detailed data about the current attributes in this frame { "label": "face", "box": [442, 506, 534, 524], - "score": 0.64 + "score": 0.86 } ] } @@ -188,3 +192,11 @@ Topic to send PTZ commands to camera. | `MOVE_` | send command to continuously move in ``, possible values are [UP, DOWN, LEFT, RIGHT] | | `ZOOM_` | send command to continuously zoom ``, possible values are [IN, OUT] | | `STOP` | send command to stop moving | + +### `frigate//ptz_autotracker/set` + +Topic to turn the PTZ autotracker for a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//ptz_autotracker/state` + +Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`. diff --git a/frigate/app.py b/frigate/app.py index df0a242e5..fb0fe3601 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -27,6 +27,7 @@ from frigate.const import ( CLIPS_DIR, CONFIG_DIR, DEFAULT_DB_PATH, + DEFAULT_QUEUE_BUFFER_SIZE, EXPORT_DIR, MODEL_CACHE_DIR, RECORD_DIR, @@ -42,13 +43,13 @@ from frigate.object_detection import ObjectDetectProcess from frigate.object_processing import TrackedObjectProcessor from frigate.output import output_frames from frigate.plus import PlusApi -from frigate.ptz import OnvifController +from frigate.ptz.autotrack import PtzAutoTrackerThread +from frigate.ptz.onvif import OnvifController from frigate.record.record import manage_recordings from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer from frigate.timeline import TimelineProcessor -from frigate.types import CameraMetricsTypes, FeatureMetricsTypes -from frigate.util.builtin import LimitedQueue as LQueue +from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes from frigate.version import VERSION from frigate.video import capture_camera, track_camera from frigate.watchdog import FrigateWatchdog @@ -67,6 +68,7 @@ class FrigateApp: self.plus_api = PlusApi() self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {} + self.ptz_metrics: dict[str, PTZMetricsTypes] = {} self.processes: dict[str, int] = {} def set_environment_vars(self) -> None: @@ -159,10 +161,27 @@ class FrigateApp: "ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 # from mypy 0.981 onwards - "frame_queue": LQueue(maxsize=2), + "frame_queue": mp.Queue(maxsize=2), "capture_process": None, "process": None, } + self.ptz_metrics[camera_name] = { + "ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "i", + self.config.cameras[camera_name].onvif.autotracking.enabled, + ), + "ptz_stopped": mp.Event(), + "ptz_reset": mp.Event(), + "ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "ptz_stop_time": mp.Value("d", 0.0), # type: ignore[typeddict-item] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + } + self.ptz_metrics[camera_name]["ptz_stopped"].set() self.feature_metrics[camera_name] = { "audio_enabled": mp.Value( # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 @@ -191,22 +210,22 @@ class FrigateApp: def init_queues(self) -> None: # Queues for clip processing - self.event_queue: Queue = ff.Queue() - self.event_processed_queue: Queue = ff.Queue() - self.video_output_queue: Queue = LQueue( + self.event_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) + self.event_processed_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) + self.video_output_queue: Queue = mp.Queue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for cameras to push tracked objects to - self.detected_frames_queue: Queue = LQueue( + self.detected_frames_queue: Queue = mp.Queue( maxsize=len(self.config.cameras.keys()) * 2 ) # Queue for recordings info - self.recordings_info_queue: Queue = ff.Queue() + self.recordings_info_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) # Queue for timeline events - self.timeline_queue: Queue = ff.Queue() + self.timeline_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) # Queue for inter process communication self.inter_process_queue: Queue = ff.Queue() @@ -317,7 +336,7 @@ class FrigateApp: ) def init_onvif(self) -> None: - self.onvif_controller = OnvifController(self.config) + self.onvif_controller = OnvifController(self.config, self.ptz_metrics) def init_dispatcher(self) -> None: comms: list[Communicator] = [] @@ -333,6 +352,7 @@ class FrigateApp: self.onvif_controller, self.camera_metrics, self.feature_metrics, + self.ptz_metrics, comms, ) @@ -373,6 +393,15 @@ class FrigateApp: detector_config, ) + def start_ptz_autotracker(self) -> None: + self.ptz_autotracker_thread = PtzAutoTrackerThread( + self.config, + self.onvif_controller, + self.ptz_metrics, + self.stop_event, + ) + self.ptz_autotracker_thread.start() + def start_detected_frames_processor(self) -> None: self.detected_frames_processor = TrackedObjectProcessor( self.config, @@ -382,6 +411,7 @@ class FrigateApp: self.event_processed_queue, self.video_output_queue, self.recordings_info_queue, + self.ptz_autotracker_thread, self.stop_event, ) self.detected_frames_processor.start() @@ -418,6 +448,7 @@ class FrigateApp: self.detection_out_events[name], self.detected_frames_queue, self.camera_metrics[name], + self.ptz_metrics[name], ), ) camera_process.daemon = True @@ -551,6 +582,7 @@ class FrigateApp: sys.exit(1) self.start_detectors() self.start_video_output_processor() + self.start_ptz_autotracker() self.start_detected_frames_processor() self.start_camera_processors() self.start_camera_capture_processes() @@ -595,6 +627,7 @@ class FrigateApp: self.dispatcher.stop() self.detected_frames_processor.join() + self.ptz_autotracker_thread.join() self.event_processor.join() self.event_cleanup.join() self.stats_emitter.join() diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index c65f3343f..f2fe40c5d 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -5,8 +5,8 @@ from abc import ABC, abstractmethod from typing import Any, Callable from frigate.config import FrigateConfig -from frigate.ptz import OnvifCommandEnum, OnvifController -from frigate.types import CameraMetricsTypes, FeatureMetricsTypes +from frigate.ptz.onvif import OnvifCommandEnum, OnvifController +from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes from frigate.util.services import restart_frigate logger = logging.getLogger(__name__) @@ -40,12 +40,14 @@ class Dispatcher: onvif: OnvifController, camera_metrics: dict[str, CameraMetricsTypes], feature_metrics: dict[str, FeatureMetricsTypes], + ptz_metrics: dict[str, PTZMetricsTypes], communicators: list[Communicator], ) -> None: self.config = config self.onvif = onvif self.camera_metrics = camera_metrics self.feature_metrics = feature_metrics + self.ptz_metrics = ptz_metrics self.comms = communicators for comm in self.comms: @@ -55,6 +57,7 @@ class Dispatcher: "audio": self._on_audio_command, "detect": self._on_detect_command, "improve_contrast": self._on_motion_improve_contrast_command, + "ptz_autotracker": self._on_ptz_autotracker_command, "motion": self._on_motion_command, "motion_contour_area": self._on_motion_contour_area_command, "motion_threshold": self._on_motion_threshold_command, @@ -161,6 +164,23 @@ class Dispatcher: self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True) + def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None: + """Callback for ptz_autotracker topic.""" + ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking + + if payload == "ON": + if not self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value: + logger.info(f"Turning on ptz autotracker for {camera_name}") + self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = True + ptz_autotracker_settings.enabled = True + elif payload == "OFF": + if self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value: + logger.info(f"Turning off ptz autotracker for {camera_name}") + self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = False + ptz_autotracker_settings.enabled = False + + self.publish(f"{camera_name}/ptz_autotracker/state", payload, retain=True) + def _on_motion_contour_area_command(self, camera_name: str, payload: int) -> None: """Callback for motion contour topic.""" try: diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py index 2859a04a2..76c4f28af 100644 --- a/frigate/comms/mqtt.py +++ b/frigate/comms/mqtt.py @@ -69,6 +69,11 @@ class MqttClient(Communicator): # type: ignore[misc] "ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr] retain=True, ) + self.publish( + f"{camera_name}/ptz_autotracker/state", + "ON" if camera.onvif.autotracking.enabled else "OFF", + retain=True, + ) self.publish( f"{camera_name}/motion_threshold/state", camera.motion.threshold, # type: ignore[union-attr] @@ -152,6 +157,7 @@ class MqttClient(Communicator): # type: ignore[misc] "audio", "motion", "improve_contrast", + "ptz_autotracker", "motion_threshold", "motion_contour_area", ] diff --git a/frigate/config.py b/frigate/config.py index 9399320fe..11ef1b9ee 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -128,11 +128,31 @@ class MqttConfig(FrigateBaseModel): return v +class PtzAutotrackConfig(FrigateBaseModel): + enabled: bool = Field(default=False, title="Enable PTZ object autotracking.") + track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") + required_zones: List[str] = Field( + default_factory=list, + title="List of required zones to be entered in order to begin autotracking.", + ) + return_preset: str = Field( + default="home", + title="Name of camera preset to return to when object tracking is over.", + ) + timeout: int = Field( + default=10, title="Seconds to delay before returning to preset." + ) + + class OnvifConfig(FrigateBaseModel): host: str = Field(default="", title="Onvif Host") port: int = Field(default=8000, title="Onvif Port") user: Optional[str] = Field(title="Onvif Username") password: Optional[str] = Field(title="Onvif Password") + autotracking: PtzAutotrackConfig = Field( + default_factory=PtzAutotrackConfig, + title="PTZ auto tracking config.", + ) class RetainModeEnum(str, Enum): @@ -393,6 +413,9 @@ class AudioConfig(FrigateBaseModel): max_not_heard: int = Field( default=30, title="Seconds of not hearing the type of audio to end the event." ) + min_volume: int = Field( + default=500, title="Min volume required to run audio detection." + ) listen: List[str] = Field( default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for." ) @@ -892,6 +915,17 @@ def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None: ) +def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None: + """Verify that required_zones are specified when autotracking is enabled.""" + if ( + camera_config.onvif.autotracking.enabled + and not camera_config.onvif.autotracking.required_zones + ): + raise ValueError( + f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones." + ) + + class FrigateConfig(FrigateBaseModel): mqtt: MqttConfig = Field(title="MQTT Configuration.") database: DatabaseConfig = Field( @@ -1067,6 +1101,7 @@ class FrigateConfig(FrigateBaseModel): verify_recording_retention(camera_config) verify_recording_segments_setup_with_reasonable_time(camera_config) verify_zone_objects_are_tracked(camera_config) + verify_autotrack_zones(camera_config) if camera_config.rtmp.enabled: logger.warning( diff --git a/frigate/const.py b/frigate/const.py index b6b0e44bd..c508a83bf 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -46,3 +46,7 @@ DRIVER_INTEL_iHD = "iHD" MAX_SEGMENT_DURATION = 600 MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times + +# Queue Values + +DEFAULT_QUEUE_BUFFER_SIZE = 2000 * 1000 # 2MB diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index ca03d483b..ac67626a2 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -27,14 +27,17 @@ class EdgeTpuTfl(DetectionApi): type_key = DETECTOR_KEY def __init__(self, detector_config: EdgeTpuDetectorConfig): - device_config = {"device": "usb"} + device_config = {} if detector_config.device is not None: device_config = {"device": detector_config.device} edge_tpu_delegate = None try: - logger.info(f"Attempting to load TPU as {device_config['device']}") + device_type = ( + device_config["device"] if "device" in device_config else "auto" + ) + logger.info(f"Attempting to load TPU as {device_type}") edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config) logger.info("TPU found") self.interpreter = Interpreter( diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py index 7251b8751..dea3fe078 100644 --- a/frigate/detectors/plugins/tensorrt.py +++ b/frigate/detectors/plugins/tensorrt.py @@ -78,7 +78,7 @@ class TensorRtDetector(DetectionApi): try: trt.init_libnvinfer_plugins(self.trt_logger, "") - ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so") + ctypes.cdll.LoadLibrary("/usr/local/lib/libyolo_layer.so") except OSError as e: logger.error( "ERROR: failed to load libraries. %s", diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 2fbb54ce8..631c4349f 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -7,7 +7,7 @@ import os import signal import threading from types import FrameType -from typing import Optional +from typing import Optional, Tuple import numpy as np import requests @@ -176,20 +176,22 @@ class AudioEventMaintainer(threading.Thread): return audio_as_float = audio.astype(np.float32) - waveform = audio_as_float / AUDIO_MAX_BIT_RANGE - model_detections = self.detector.detect(waveform) + rms, _ = self.calculate_audio_levels(audio_as_float) - self.calculate_audio_levels(audio_as_float) + # only run audio detection when volume is above min_volume + if rms >= self.config.audio.min_volume: + waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) + model_detections = self.detector.detect(waveform) - for label, score, _ in model_detections: - if label not in self.config.audio.listen: - continue + for label, score, _ in model_detections: + if label not in self.config.audio.listen: + continue - self.handle_detection(label, score) + self.handle_detection(label, score) self.expire_detections() - def calculate_audio_levels(self, audio_as_float: np.float32) -> None: + def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]: # Calculate RMS (Root-Mean-Square) which represents the average signal amplitude # Note: np.float32 isn't serializable, we must use np.float64 to publish the message rms = np.sqrt(np.mean(np.absolute(audio_as_float**2))) @@ -204,6 +206,8 @@ class AudioEventMaintainer(threading.Thread): (f"{self.config.name}/audio/rms", float(rms)) ) + return float(rms), float(dBFS) + def handle_detection(self, label: str, score: float) -> None: if self.detections.get(label): self.detections[label][ @@ -216,7 +220,7 @@ class AudioEventMaintainer(threading.Thread): ) if resp.status_code == 200: - event_id = resp.json()[0]["event_id"] + event_id = resp.json()["event_id"] self.detections[label] = { "id": event_id, "label": label, diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 9640128e1..d92bb0a44 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -199,7 +199,8 @@ class EventProcessor(threading.Thread): # only overwrite the sub_label in the database if it's set if event_data.get("sub_label") is not None: - event[Event.sub_label] = event_data["sub_label"] + event[Event.sub_label] = event_data["sub_label"][0] + event[Event.data]["sub_label_score"] = event_data["sub_label"][1] ( Event.insert(event) diff --git a/frigate/http.py b/frigate/http.py index fe6dc54ef..95fd25502 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -24,21 +24,26 @@ from flask import ( make_response, request, ) -from peewee import DoesNotExist, SqliteDatabase, fn, operator +from peewee import DoesNotExist, fn, operator from playhouse.shortcuts import model_to_dict +from playhouse.sqliteq import SqliteQueueDatabase from tzlocal import get_localzone_name from frigate.config import FrigateConfig -from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR +from frigate.const import CLIPS_DIR, CONFIG_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.events.external import ExternalEventProcessor from frigate.models import Event, Recordings, Timeline from frigate.object_processing import TrackedObject from frigate.plus import PlusApi -from frigate.ptz import OnvifController +from frigate.ptz.onvif import OnvifController from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.stats import stats_snapshot from frigate.storage import StorageMaintainer -from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers +from frigate.util.builtin import ( + clean_camera_user_pass, + get_tz_modifiers, + update_yaml_from_url, +) from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel from frigate.version import VERSION @@ -49,7 +54,7 @@ bp = Blueprint("frigate", __name__) def create_app( frigate_config, - database: SqliteDatabase, + database: SqliteQueueDatabase, stats_tracking, detected_frames_processor, storage_maintainer: StorageMaintainer, @@ -415,8 +420,8 @@ def get_labels(): else: events = Event.select(Event.label).distinct() except Exception as e: - return jsonify( - {"success": False, "message": f"Failed to get labels: {e}"}, "404" + return make_response( + jsonify({"success": False, "message": f"Failed to get labels: {e}"}), 404 ) labels = sorted([e.label for e in events]) @@ -430,8 +435,9 @@ def get_sub_labels(): try: events = Event.select(Event.sub_label).distinct() except Exception as e: - return jsonify( - {"success": False, "message": f"Failed to get sub_labels: {e}"}, "404" + return make_response( + jsonify({"success": False, "message": f"Failed to get sub_labels: {e}"}), + 404, ) sub_labels = [e.sub_label for e in events] @@ -864,12 +870,17 @@ def events(): @bp.route("/events//