diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 9c5dec5bb..1a1832f3b 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ sudo mkdir -p /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate +# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the +# s6 service file. For dev, where frigate is started from an interactive +# shell, we define it in .bashrc instead. +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc + make version cd web diff --git a/Dockerfile b/Dockerfile index d1dbd0755..660cb5b25 100644 --- a/Dockerfile +++ b/Dockerfile @@ -262,15 +262,35 @@ FROM deps AS frigate WORKDIR /opt/frigate/ COPY --from=rootfs / / +# Build TensorRT-specific library +FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps + +RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ + /tensorrt_libyolo.sh + # Frigate w/ TensorRT Support as separate image FROM frigate AS frigate-tensorrt + +#Disable S6 Global timeout +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +ENV TRT_VER=8.5.3 +ENV YOLO_MODELS="yolov7-tiny-416" + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/support/tensorrt_detector/rootfs/ / + RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl && \ - ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \ ldconfig # Dev Container w/ TRT FROM devcontainer AS devcontainer-trt +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/support/tensorrt_detector/rootfs/ / +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/build_nginx.sh b/docker/build_nginx.sh index 1e7bfad21..56c9a146d 100755 --- a/docker/build_nginx.sh +++ b/docker/build_nginx.sh @@ -2,10 +2,10 @@ set -euxo pipefail -NGINX_VERSION="1.22.1" -VOD_MODULE_VERSION="1.30" -SECURE_TOKEN_MODULE_VERSION="1.4" -RTMP_MODULE_VERSION="1.2.1" +NGINX_VERSION="1.25.1" +VOD_MODULE_VERSION="1.31" +SECURE_TOKEN_MODULE_VERSION="1.5" +RTMP_MODULE_VERSION="1.2.2" cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list diff --git a/docker/install_deps.sh b/docker/install_deps.sh index 25b6951b5..7d5242d83 100755 --- a/docker/install_deps.sh +++ b/docker/install_deps.sh @@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then libva-drm2 mesa-va-drivers fi -apt-get purge gnupg apt-transport-https wget xz-utils -y +apt-get purge gnupg apt-transport-https xz-utils -y apt-get clean autoclean -y apt-get autoremove --purge -y rm -rf /var/lib/apt/lists/* diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index 0a835550e..f2cc40fcf 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -44,6 +44,7 @@ function migrate_db_path() { echo "[INFO] Preparing Frigate..." migrate_db_path +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') echo "[INFO] Starting Frigate..." diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 85c8f9526..fd5fcb568 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -43,6 +43,8 @@ function get_ip_and_port_from_supervisor() { export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" } +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') + if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then echo "[INFO] Preparing go2rtc config..." diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py index 1397adee8..0531b173d 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/rootfs/usr/local/go2rtc/create_config.py @@ -7,7 +7,7 @@ import sys import yaml sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402 +from frigate.const import BIRDSEYE_PIPE # noqa: E402 from frigate.ffmpeg_presets import ( # noqa: E402 parse_preset_hardware_acceleration_encode, ) @@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None: go2rtc_config["rtsp"]["default_query"] = "mp4" # need to replace ffmpeg command when using ffmpeg4 -if not os.path.exists(BTBN_PATH): +if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: if go2rtc_config.get("ffmpeg") is None: go2rtc_config["ffmpeg"] = { "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" diff --git a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf similarity index 94% rename from docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf rename to docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf index d4248d047..fe16ed9c5 100644 --- a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf +++ b/docker/support/tensorrt_detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf @@ -1,3 +1,4 @@ +/usr/local/lib /usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base new file mode 100644 index 000000000..e69de29bb diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run new file mode 100755 index 000000000..5f0e43553 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run @@ -0,0 +1,53 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Generate models for the TensorRT detector + +set -o errexit -o nounset -o pipefail + +MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} +OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" + +# Create output folder +mkdir -p ${OUTPUT_FOLDER} + +FIRST_MODEL=true +MODEL_CONVERT="" + +for model in ${YOLO_MODELS//,/ } +do + # Remove old link in case path/version changed + rm -f ${MODEL_CACHE_DIR}/${model}.trt + + if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then + if [[ ${FIRST_MODEL} = true ]]; then + MODEL_CONVERT="${model}" + FIRST_MODEL=false; + else + MODEL_CONVERT+=",${model}"; + fi + else + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + fi +done + +if [[ -z ${MODEL_CONVERT} ]]; then + echo "No models to convert." + exit 0 +fi + +echo "Generating the following TRT Models: ${MODEL_CONVERT}" + +# Build trt engine +cd /usr/local/src/tensorrt_demos/yolo + +# Download yolo weights +./download_yolo.sh $MODEL_CONVERT > /dev/null + +for model in ${MODEL_CONVERT//,/ } +do + echo "Converting ${model} model" + python3 yolo_to_onnx.py -m ${model} > /dev/null + python3 onnx_to_tensorrt.py -m ${model} > /dev/null + cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt +done diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up new file mode 100644 index 000000000..b9de40ad0 --- /dev/null +++ b/docker/support/tensorrt_detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/trt-model-prepare/run diff --git a/docker/support/tensorrt_detector/tensorrt_libyolo.sh b/docker/support/tensorrt_detector/tensorrt_libyolo.sh new file mode 100755 index 000000000..e6fc415e5 --- /dev/null +++ b/docker/support/tensorrt_detector/tensorrt_libyolo.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -euxo pipefail + +SCRIPT_DIR="/usr/local/src/tensorrt_demos" + +# Clone tensorrt_demos repo +git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download + +# Build libyolo +cd ./tensorrt_demos/plugins && make all +cp libyolo_layer.so /usr/local/lib/libyolo_layer.so + +# Store yolo scripts for later conversion +cd ../ +mkdir -p ${SCRIPT_DIR}/plugins +cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so +cp -a yolo ${SCRIPT_DIR}/ diff --git a/docker/tensorrt_models.sh b/docker/tensorrt_models.sh deleted file mode 100755 index 957e817d6..000000000 --- a/docker/tensorrt_models.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -CUDA_HOME=/usr/local/cuda -LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 -OUTPUT_FOLDER=/tensorrt_models -echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}" - -# Create output folder -mkdir -p ${OUTPUT_FOLDER} - -# Install packages -pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3 - -# Clone tensorrt_demos repo -git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos - -# Build libyolo -cd /tensorrt_demos/plugins && make all -cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so - -# Download yolo weights -cd /tensorrt_demos/yolo && ./download_yolo.sh - -# Build trt engine -cd /tensorrt_demos/yolo - -for model in ${YOLO_MODELS//,/ } -do - python3 yolo_to_onnx.py -m ${model} - python3 onnx_to_tensorrt.py -m ${model} - cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; -done diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md new file mode 100644 index 000000000..b1a2654eb --- /dev/null +++ b/docs/docs/configuration/autotracking.md @@ -0,0 +1,71 @@ +--- +id: autotracking +title: Autotracking +--- + +An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. + +## Autotracking behavior + +Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it. + +Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will track that new object. + +When tracking has ended, Frigate will return to the camera preset specified by the `return_preset` configuration entry. + +## Checking ONVIF camera support + +Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry). + +Many cheaper PTZs likely don't support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported. + +Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera. + +## Configuration + +First, configure the ONVIF parameters for your camera, then specify the object types to track, a required zone the object must enter, and a camera preset name to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. + +An [ONVIF connection](cameras.md) is required for autotracking to function. + +Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. + +```yaml +cameras: + ptzcamera: + ... + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF camera preset to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 +``` + +## Best practices and considerations + +Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR. + +The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases (especially for faster moving objects), the default 5 fps is insufficient for the motion estimator to perform accurately. 10 fps is the current recommendation. Higher frame rates will likely not be more performant and will only slow down Frigate and the motion estimator. Adjust your camera to output at least 10 frames per second and change the `fps` parameter in the [detect configuration](index.md) of your configuration file. + +A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. If Frigate already has trouble keeping track of your object, the autotracker will struggle as well. + +The autotracker will add PTZ motion requests to a queue while the motor is moving. Once the motor stops, the events in the queue will be executed together as one large move (rather than incremental moves). If your PTZ's motor is slow, you may not be able to reliably autotrack fast moving objects. diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 8f907cb3f..1804003a5 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -66,3 +66,5 @@ cameras: ``` then PTZ controls will be available in the cameras WebUI. + +An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 2228a5047..5e5421e3a 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -145,6 +145,12 @@ audio: enabled: False # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) max_not_heard: 30 + # Optional: Configure the min rms volume required to run audio detection (default: shown below) + # As a rule of thumb: + # - 200 - high sensitivity + # - 500 - medium sensitivity + # - 1000 - low sensitivity + min_volume: 500 # Optional: Types of audio to listen for (default: shown below) listen: - bark @@ -555,6 +561,21 @@ cameras: user: admin # Optional: password for login. password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF camera preset to return to when tracking is over. + return_preset: preset_name + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 # Optional: Configuration for how to sort the cameras in the Birdseye view. birdseye: diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 3f48423bc..d684a2917 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -174,9 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t ### Minimum Hardware Support -The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. - -> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support. +The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. @@ -192,22 +190,15 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben ### Generate Models -The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models. +The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. -To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script. +The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. -```bash -mkdir trt-models -wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh -chmod +x tensorrt_models.sh -docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh -``` +To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. -The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config. +If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. -If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it. - -Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. +Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. ``` yolov3-288 @@ -237,11 +228,20 @@ yolov7x-640 yolov7x-320 ``` +An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this: + +```yml +frigate: + environment: + - YOLO_MODELS="yolov4-608,yolov7x-640" + - USE_FP16=false +``` + ### Configuration Parameters The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. -The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated. +The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. ```yaml detectors: @@ -250,7 +250,7 @@ detectors: device: 0 #This is the default, select the first GPU model: - path: /trt-models/yolov7-tiny-416.trt + path: /config/model_cache/tensorrt/yolov7-tiny-416.trt input_tensor: nchw input_pixel_format: rgb width: 416 diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 36233ea68..5daf8fe3b 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known ### TensorRT -The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). +The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). Inference speeds will vary greatly depending on the GPU and the model used. `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 229285676..43539d461 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -63,7 +63,9 @@ Message published for each changed event. The first message is published when th "stationary": false, // whether or not the object is considered stationary "motionless_count": 0, // number of frames the object has been motionless "position_changes": 2, // number of times the object has moved from a stationary position - "attributes": [], // set of unique attributes that have been identified on the object + "attributes": { + "face": 0.64 + }, // attributes with top score that have been identified on the object at any point "current_attributes": [] // detailed data about the current attributes in this frame }, "after": { @@ -90,13 +92,15 @@ Message published for each changed event. The first message is published when th "stationary": false, // whether or not the object is considered stationary "motionless_count": 0, // number of frames the object has been motionless "position_changes": 2, // number of times the object has changed position - "attributes": ["face"], // set of unique attributes that have been identified on the object + "attributes": { + "face": 0.86 + }, // attributes with top score that have been identified on the object at any point "current_attributes": [ // detailed data about the current attributes in this frame { "label": "face", "box": [442, 506, 534, 524], - "score": 0.64 + "score": 0.86 } ] } @@ -184,7 +188,15 @@ Topic to send PTZ commands to camera. | Command | Description | | ---------------------- | ----------------------------------------------------------------------------------------- | -| `preset-` | send command to move to preset with name `` | +| `preset_` | send command to move to preset with name `` | | `MOVE_` | send command to continuously move in ``, possible values are [UP, DOWN, LEFT, RIGHT] | | `ZOOM_` | send command to continuously zoom ``, possible values are [IN, OUT] | | `STOP` | send command to stop moving | + +### `frigate//ptz_autotracker/set` + +Topic to turn the PTZ autotracker for a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//ptz_autotracker/state` + +Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`. diff --git a/frigate/app.py b/frigate/app.py index 4d4aa5dd4..8d863bd14 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -10,6 +10,7 @@ from multiprocessing.synchronize import Event as MpEvent from types import FrameType from typing import Optional +import faster_fifo as ff import psutil from faster_fifo import Queue from peewee_migrate import Router @@ -25,6 +26,7 @@ from frigate.const import ( CLIPS_DIR, CONFIG_DIR, DEFAULT_DB_PATH, + DEFAULT_QUEUE_BUFFER_SIZE, EXPORT_DIR, MODEL_CACHE_DIR, RECORD_DIR, @@ -40,7 +42,8 @@ from frigate.object_detection import ObjectDetectProcess from frigate.object_processing import TrackedObjectProcessor from frigate.output import output_frames from frigate.plus import PlusApi -from frigate.ptz import OnvifController +from frigate.ptz.autotrack import PtzAutoTrackerThread +from frigate.ptz.onvif import OnvifController from frigate.record.record import manage_recordings from frigate.stats import StatsEmitter, stats_init from frigate.storage import StorageMaintainer @@ -56,11 +59,11 @@ logger = logging.getLogger(__name__) class FrigateApp: def __init__(self) -> None: self.stop_event: MpEvent = mp.Event() - self.detection_queue: Queue = mp.Queue() + self.detection_queue: Queue = ff.Queue() self.detectors: dict[str, ObjectDetectProcess] = {} self.detection_out_events: dict[str, MpEvent] = {} self.detection_shms: list[mp.shared_memory.SharedMemory] = [] - self.log_queue: Queue = mp.Queue() + self.log_queue: Queue = ff.Queue() self.plus_api = PlusApi() self.camera_metrics: dict[str, CameraMetricsTypes] = {} self.feature_metrics: dict[str, FeatureMetricsTypes] = {} @@ -132,6 +135,13 @@ class FrigateApp: "i", self.config.cameras[camera_name].motion.improve_contrast, ), + "ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item] + # issue https://github.com/python/typeshed/issues/8799 + # from mypy 0.981 onwards + "i", + self.config.cameras[camera_name].onvif.autotracking.enabled, + ), + "ptz_stopped": mp.Event(), "motion_threshold": mp.Value( # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 # from mypy 0.981 onwards @@ -160,6 +170,7 @@ class FrigateApp: "capture_process": None, "process": None, } + self.camera_metrics[camera_name]["ptz_stopped"].set() self.feature_metrics[camera_name] = { "audio_enabled": mp.Value( # type: ignore[typeddict-item] # issue https://github.com/python/typeshed/issues/8799 @@ -188,8 +199,8 @@ class FrigateApp: def init_queues(self) -> None: # Queues for clip processing - self.event_queue: Queue = mp.Queue() - self.event_processed_queue: Queue = mp.Queue() + self.event_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) + self.event_processed_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) self.video_output_queue: Queue = mp.Queue( maxsize=len(self.config.cameras.keys()) * 2 ) @@ -200,10 +211,10 @@ class FrigateApp: ) # Queue for recordings info - self.recordings_info_queue: Queue = mp.Queue() + self.recordings_info_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) # Queue for timeline events - self.timeline_queue: Queue = mp.Queue() + self.timeline_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE) def init_database(self) -> None: def vacuum_db(db: SqliteExtDatabase) -> None: @@ -306,7 +317,7 @@ class FrigateApp: ) def init_onvif(self) -> None: - self.onvif_controller = OnvifController(self.config) + self.onvif_controller = OnvifController(self.config, self.camera_metrics) def init_dispatcher(self) -> None: comms: list[Communicator] = [] @@ -360,6 +371,15 @@ class FrigateApp: detector_config, ) + def start_ptz_autotracker(self) -> None: + self.ptz_autotracker_thread = PtzAutoTrackerThread( + self.config, + self.onvif_controller, + self.camera_metrics, + self.stop_event, + ) + self.ptz_autotracker_thread.start() + def start_detected_frames_processor(self) -> None: self.detected_frames_processor = TrackedObjectProcessor( self.config, @@ -369,6 +389,7 @@ class FrigateApp: self.event_processed_queue, self.video_output_queue, self.recordings_info_queue, + self.ptz_autotracker_thread, self.stop_event, ) self.detected_frames_processor.start() @@ -533,6 +554,7 @@ class FrigateApp: sys.exit(1) self.start_detectors() self.start_video_output_processor() + self.start_ptz_autotracker() self.start_detected_frames_processor() self.start_camera_processors() self.start_camera_capture_processes() @@ -577,6 +599,7 @@ class FrigateApp: self.dispatcher.stop() self.detected_frames_processor.join() + self.ptz_autotracker_thread.join() self.event_processor.join() self.event_cleanup.join() self.stats_emitter.join() diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 1c9105ce8..5025df2e6 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -5,9 +5,9 @@ from abc import ABC, abstractmethod from typing import Any, Callable from frigate.config import FrigateConfig -from frigate.ptz import OnvifCommandEnum, OnvifController +from frigate.ptz.onvif import OnvifCommandEnum, OnvifController from frigate.types import CameraMetricsTypes, FeatureMetricsTypes -from frigate.util import restart_frigate +from frigate.util.services import restart_frigate logger = logging.getLogger(__name__) @@ -55,6 +55,7 @@ class Dispatcher: "audio": self._on_audio_command, "detect": self._on_detect_command, "improve_contrast": self._on_motion_improve_contrast_command, + "ptz_autotracker": self._on_ptz_autotracker_command, "motion": self._on_motion_command, "motion_contour_area": self._on_motion_contour_area_command, "motion_threshold": self._on_motion_threshold_command, @@ -159,6 +160,25 @@ class Dispatcher: self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True) + def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None: + """Callback for ptz_autotracker topic.""" + ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking + + if payload == "ON": + if not self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value: + logger.info(f"Turning on ptz autotracker for {camera_name}") + self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value = True + ptz_autotracker_settings.enabled = True + elif payload == "OFF": + if self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value: + logger.info(f"Turning off ptz autotracker for {camera_name}") + self.camera_metrics[camera_name][ + "ptz_autotracker_enabled" + ].value = False + ptz_autotracker_settings.enabled = False + + self.publish(f"{camera_name}/ptz_autotracker/state", payload, retain=True) + def _on_motion_contour_area_command(self, camera_name: str, payload: int) -> None: """Callback for motion contour topic.""" try: @@ -253,7 +273,7 @@ class Dispatcher: try: if "preset" in payload.lower(): command = OnvifCommandEnum.preset - param = payload.lower().split("-")[1] + param = payload.lower()[payload.index("_") + 1 :] else: command = OnvifCommandEnum[payload.lower()] param = "" diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py index 2859a04a2..76c4f28af 100644 --- a/frigate/comms/mqtt.py +++ b/frigate/comms/mqtt.py @@ -69,6 +69,11 @@ class MqttClient(Communicator): # type: ignore[misc] "ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr] retain=True, ) + self.publish( + f"{camera_name}/ptz_autotracker/state", + "ON" if camera.onvif.autotracking.enabled else "OFF", + retain=True, + ) self.publish( f"{camera_name}/motion_threshold/state", camera.motion.threshold, # type: ignore[union-attr] @@ -152,6 +157,7 @@ class MqttClient(Communicator): # type: ignore[misc] "audio", "motion", "improve_contrast", + "ptz_autotracker", "motion_threshold", "motion_contour_area", ] diff --git a/frigate/config.py b/frigate/config.py index 536dae151..2e3ade2fb 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -22,13 +22,13 @@ from frigate.ffmpeg_presets import ( parse_preset_output_rtmp, ) from frigate.plus import PlusApi -from frigate.util import ( - create_mask, +from frigate.util.builtin import ( deep_merge, escape_special_characters, get_ffmpeg_arg_list, load_config_with_no_duplicates, ) +from frigate.util.image import create_mask logger = logging.getLogger(__name__) @@ -138,11 +138,31 @@ class MqttConfig(FrigateBaseModel): return v +class PtzAutotrackConfig(FrigateBaseModel): + enabled: bool = Field(default=False, title="Enable PTZ object autotracking.") + track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") + required_zones: List[str] = Field( + default_factory=list, + title="List of required zones to be entered in order to begin autotracking.", + ) + return_preset: str = Field( + default="home", + title="Name of camera preset to return to when object tracking is over.", + ) + timeout: int = Field( + default=10, title="Seconds to delay before returning to preset." + ) + + class OnvifConfig(FrigateBaseModel): host: str = Field(default="", title="Onvif Host") port: int = Field(default=8000, title="Onvif Port") user: Optional[str] = Field(title="Onvif Username") password: Optional[str] = Field(title="Onvif Password") + autotracking: PtzAutotrackConfig = Field( + default_factory=PtzAutotrackConfig, + title="PTZ auto tracking config.", + ) class RetainModeEnum(str, Enum): @@ -403,6 +423,9 @@ class AudioConfig(FrigateBaseModel): max_not_heard: int = Field( default=30, title="Seconds of not hearing the type of audio to end the event." ) + min_volume: int = Field( + default=500, title="Min volume required to run audio detection." + ) listen: List[str] = Field( default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for." ) @@ -902,6 +925,17 @@ def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None: ) +def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None: + """Verify that required_zones are specified when autotracking is enabled.""" + if ( + camera_config.onvif.autotracking.enabled + and not camera_config.onvif.autotracking.required_zones + ): + raise ValueError( + f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones." + ) + + class FrigateConfig(FrigateBaseModel): mqtt: MqttConfig = Field(title="MQTT Configuration.") database: DatabaseConfig = Field( @@ -1077,6 +1111,7 @@ class FrigateConfig(FrigateBaseModel): verify_recording_retention(camera_config) verify_recording_segments_setup_with_reasonable_time(camera_config) verify_zone_objects_are_tracked(camera_config) + verify_autotrack_zones(camera_config) if camera_config.rtmp.enabled: logger.warning( diff --git a/frigate/const.py b/frigate/const.py index 20e2b0daa..c508a83bf 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml") FRIGATE_LOCALHOST = "http://127.0.0.1:5000" PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_API_HOST = "https://api.frigate.video" -BTBN_PATH = "/usr/lib/btbn-ffmpeg" # Attributes @@ -47,3 +46,7 @@ DRIVER_INTEL_iHD = "iHD" MAX_SEGMENT_DURATION = 600 MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times + +# Queue Values + +DEFAULT_QUEUE_BUFFER_SIZE = 2000 * 1000 # 2MB diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index f65826a57..ca1915449 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -11,7 +11,7 @@ from pydantic import BaseModel, Extra, Field from pydantic.fields import PrivateAttr from frigate.plus import PlusApi -from frigate.util import load_labels +from frigate.util.builtin import load_labels logger = logging.getLogger(__name__) diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index ca03d483b..ac67626a2 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -27,14 +27,17 @@ class EdgeTpuTfl(DetectionApi): type_key = DETECTOR_KEY def __init__(self, detector_config: EdgeTpuDetectorConfig): - device_config = {"device": "usb"} + device_config = {} if detector_config.device is not None: device_config = {"device": detector_config.device} edge_tpu_delegate = None try: - logger.info(f"Attempting to load TPU as {device_config['device']}") + device_type = ( + device_config["device"] if "device" in device_config else "auto" + ) + logger.info(f"Attempting to load TPU as {device_type}") edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config) logger.info("TPU found") self.interpreter = Interpreter( diff --git a/frigate/detectors/plugins/tensorrt.py b/frigate/detectors/plugins/tensorrt.py index 7251b8751..dea3fe078 100644 --- a/frigate/detectors/plugins/tensorrt.py +++ b/frigate/detectors/plugins/tensorrt.py @@ -78,7 +78,7 @@ class TensorRtDetector(DetectionApi): try: trt.init_libnvinfer_plugins(self.trt_logger, "") - ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so") + ctypes.cdll.LoadLibrary("/usr/local/lib/libyolo_layer.so") except OSError as e: logger.error( "ERROR: failed to load libraries. %s", diff --git a/frigate/events/audio.py b/frigate/events/audio.py index 488c94fcc..decd17ca4 100644 --- a/frigate/events/audio.py +++ b/frigate/events/audio.py @@ -26,7 +26,8 @@ from frigate.ffmpeg_presets import parse_preset_input from frigate.log import LogPipe from frigate.object_detection import load_labels from frigate.types import FeatureMetricsTypes -from frigate.util import get_ffmpeg_arg_list, listen +from frigate.util.builtin import get_ffmpeg_arg_list +from frigate.util.services import listen from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg try: @@ -168,14 +169,18 @@ class AudioEventMaintainer(threading.Thread): if not self.feature_metrics[self.config.name]["audio_enabled"].value: return - waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) - model_detections = self.detector.detect(waveform) + rms = np.sqrt(np.mean(np.absolute(np.square(audio.astype(np.float32))))) - for label, score, _ in model_detections: - if label not in self.config.audio.listen: - continue + # only run audio detection when volume is above min_volume + if rms >= self.config.audio.min_volume: + waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32) + model_detections = self.detector.detect(waveform) - self.handle_detection(label, score) + for label, score, _ in model_detections: + if label not in self.config.audio.listen: + continue + + self.handle_detection(label, score) self.expire_detections() @@ -191,7 +196,7 @@ class AudioEventMaintainer(threading.Thread): ) if resp.status_code == 200: - event_id = resp.json()[0]["event_id"] + event_id = resp.json()["event_id"] self.detections[label] = { "id": event_id, "label": label, diff --git a/frigate/events/external.py b/frigate/events/external.py index 20456b9cb..a801e6d24 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -14,7 +14,7 @@ from faster_fifo import Queue from frigate.config import CameraConfig, FrigateConfig from frigate.const import CLIPS_DIR from frigate.events.maintainer import EventTypeEnum -from frigate.util import draw_box_with_label +from frigate.util.image import draw_box_with_label logger = logging.getLogger(__name__) @@ -57,8 +57,12 @@ class ExternalEventProcessor: "label": label, "sub_label": sub_label, "camera": camera, - "start_time": now, - "end_time": now + duration if duration is not None else None, + "start_time": now - camera_config.record.events.pre_capture, + "end_time": now + + duration + + camera_config.record.events.post_capture + if duration is not None + else None, "thumbnail": thumbnail, "has_clip": camera_config.record.enabled and include_recording, "has_snapshot": True, diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 34cb01261..d92bb0a44 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -11,7 +11,7 @@ from faster_fifo import Queue from frigate.config import EventsConfig, FrigateConfig from frigate.models import Event from frigate.types import CameraMetricsTypes -from frigate.util import to_relative_box +from frigate.util.builtin import to_relative_box logger = logging.getLogger(__name__) @@ -199,7 +199,8 @@ class EventProcessor(threading.Thread): # only overwrite the sub_label in the database if it's set if event_data.get("sub_label") is not None: - event[Event.sub_label] = event_data["sub_label"] + event[Event.sub_label] = event_data["sub_label"][0] + event[Event.data]["sub_label_score"] = event_data["sub_label"][1] ( Event.insert(event) diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index a2785813c..43d2504bd 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -5,8 +5,7 @@ import os from enum import Enum from typing import Any -from frigate.const import BTBN_PATH -from frigate.util import vainfo_hwaccel +from frigate.util.services import vainfo_hwaccel from frigate.version import VERSION logger = logging.getLogger(__name__) @@ -43,7 +42,11 @@ class LibvaGpuSelector: return "" -TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout" +TIMEOUT_PARAM = ( + "-timeout" + if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59 + else "-stimeout" +) _gpu_selector = LibvaGpuSelector() _user_agent_args = [ @@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = { } PRESETS_HW_ACCEL_SCALE = { - "preset-rpi-32-h264": "-r {0} -s {1}x{2}", - "preset-rpi-64-h264": "-r {0} -s {1}x{2}", + "preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}", + "preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}", "preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p", "preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", "preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p", - "default": "-r {0} -s {1}x{2}", + "default": "-r {0} -vf fps={0},scale={1}:{2}", } PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { diff --git a/frigate/http.py b/frigate/http.py index 57b2103e7..95fd25502 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -24,27 +24,27 @@ from flask import ( make_response, request, ) -from peewee import DoesNotExist, SqliteDatabase, fn, operator +from peewee import DoesNotExist, fn, operator from playhouse.shortcuts import model_to_dict +from playhouse.sqliteq import SqliteQueueDatabase from tzlocal import get_localzone_name from frigate.config import FrigateConfig -from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR +from frigate.const import CLIPS_DIR, CONFIG_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.events.external import ExternalEventProcessor from frigate.models import Event, Recordings, Timeline from frigate.object_processing import TrackedObject from frigate.plus import PlusApi -from frigate.ptz import OnvifController +from frigate.ptz.onvif import OnvifController from frigate.record.export import PlaybackFactorEnum, RecordingExporter from frigate.stats import stats_snapshot from frigate.storage import StorageMaintainer -from frigate.util import ( +from frigate.util.builtin import ( clean_camera_user_pass, - ffprobe_stream, get_tz_modifiers, - restart_frigate, - vainfo_hwaccel, + update_yaml_from_url, ) +from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel from frigate.version import VERSION logger = logging.getLogger(__name__) @@ -54,7 +54,7 @@ bp = Blueprint("frigate", __name__) def create_app( frigate_config, - database: SqliteDatabase, + database: SqliteQueueDatabase, stats_tracking, detected_frames_processor, storage_maintainer: StorageMaintainer, @@ -420,8 +420,8 @@ def get_labels(): else: events = Event.select(Event.label).distinct() except Exception as e: - return jsonify( - {"success": False, "message": f"Failed to get labels: {e}"}, "404" + return make_response( + jsonify({"success": False, "message": f"Failed to get labels: {e}"}), 404 ) labels = sorted([e.label for e in events]) @@ -435,8 +435,9 @@ def get_sub_labels(): try: events = Event.select(Event.sub_label).distinct() except Exception as e: - return jsonify( - {"success": False, "message": f"Failed to get sub_labels: {e}"}, "404" + return make_response( + jsonify({"success": False, "message": f"Failed to get sub_labels: {e}"}), + 404, ) sub_labels = [e.sub_label for e in events] @@ -869,12 +870,17 @@ def events(): @bp.route("/events//