mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 18:55:23 +03:00
Merge branch 'dev' into advancedoptionsui
This commit is contained in:
commit
3ed1ab8ab1
@ -14,6 +14,11 @@ curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
|
||||
sudo mkdir -p /media/frigate
|
||||
sudo chown -R "$(id -u):$(id -g)" /media/frigate
|
||||
|
||||
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
|
||||
# s6 service file. For dev, where frigate is started from an interactive
|
||||
# shell, we define it in .bashrc instead.
|
||||
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
|
||||
|
||||
make version
|
||||
|
||||
cd web
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@ -262,15 +262,35 @@ FROM deps AS frigate
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps
|
||||
|
||||
RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM frigate AS frigate-tensorrt
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
ENV TRT_VER=8.5.3
|
||||
ENV YOLO_MODELS="yolov7-tiny-416"
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/support/tensorrt_detector/rootfs/ /
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \
|
||||
ldconfig
|
||||
|
||||
# Dev Container w/ TRT
|
||||
FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/support/tensorrt_detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
@ -2,10 +2,10 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
NGINX_VERSION="1.22.1"
|
||||
VOD_MODULE_VERSION="1.30"
|
||||
SECURE_TOKEN_MODULE_VERSION="1.4"
|
||||
RTMP_MODULE_VERSION="1.2.1"
|
||||
NGINX_VERSION="1.25.1"
|
||||
VOD_MODULE_VERSION="1.31"
|
||||
SECURE_TOKEN_MODULE_VERSION="1.5"
|
||||
RTMP_MODULE_VERSION="1.2.2"
|
||||
|
||||
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
||||
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
||||
|
||||
@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
libva-drm2 mesa-va-drivers
|
||||
fi
|
||||
|
||||
apt-get purge gnupg apt-transport-https wget xz-utils -y
|
||||
apt-get purge gnupg apt-transport-https xz-utils -y
|
||||
apt-get clean autoclean -y
|
||||
apt-get autoremove --purge -y
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@ -44,6 +44,7 @@ function migrate_db_path() {
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
migrate_db_path
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
|
||||
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
|
||||
@ -43,6 +43,8 @@ function get_ip_and_port_from_supervisor() {
|
||||
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
||||
}
|
||||
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
|
||||
|
||||
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Preparing go2rtc config..."
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ import sys
|
||||
import yaml
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.const import BIRDSEYE_PIPE, BTBN_PATH # noqa: E402
|
||||
from frigate.const import BIRDSEYE_PIPE # noqa: E402
|
||||
from frigate.ffmpeg_presets import ( # noqa: E402
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
@ -71,7 +71,7 @@ elif go2rtc_config["rtsp"].get("default_query") is None:
|
||||
go2rtc_config["rtsp"]["default_query"] = "mp4"
|
||||
|
||||
# need to replace ffmpeg command when using ffmpeg4
|
||||
if not os.path.exists(BTBN_PATH):
|
||||
if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {
|
||||
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
/usr/local/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
|
||||
@ -0,0 +1,53 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Generate models for the TensorRT detector
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
||||
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
||||
|
||||
# Create output folder
|
||||
mkdir -p ${OUTPUT_FOLDER}
|
||||
|
||||
FIRST_MODEL=true
|
||||
MODEL_CONVERT=""
|
||||
|
||||
for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
# Remove old link in case path/version changed
|
||||
rm -f ${MODEL_CACHE_DIR}/${model}.trt
|
||||
|
||||
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
|
||||
if [[ ${FIRST_MODEL} = true ]]; then
|
||||
MODEL_CONVERT="${model}"
|
||||
FIRST_MODEL=false;
|
||||
else
|
||||
MODEL_CONVERT+=",${model}";
|
||||
fi
|
||||
else
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${MODEL_CONVERT} ]]; then
|
||||
echo "No models to convert."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
|
||||
|
||||
# Build trt engine
|
||||
cd /usr/local/src/tensorrt_demos/yolo
|
||||
|
||||
# Download yolo weights
|
||||
./download_yolo.sh $MODEL_CONVERT > /dev/null
|
||||
|
||||
for model in ${MODEL_CONVERT//,/ }
|
||||
do
|
||||
echo "Converting ${model} model"
|
||||
python3 yolo_to_onnx.py -m ${model} > /dev/null
|
||||
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
|
||||
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
done
|
||||
@ -0,0 +1 @@
|
||||
oneshot
|
||||
@ -0,0 +1 @@
|
||||
/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
|
||||
18
docker/support/tensorrt_detector/tensorrt_libyolo.sh
Executable file
18
docker/support/tensorrt_detector/tensorrt_libyolo.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
SCRIPT_DIR="/usr/local/src/tensorrt_demos"
|
||||
|
||||
# Clone tensorrt_demos repo
|
||||
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
|
||||
|
||||
# Build libyolo
|
||||
cd ./tensorrt_demos/plugins && make all
|
||||
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
|
||||
# Store yolo scripts for later conversion
|
||||
cd ../
|
||||
mkdir -p ${SCRIPT_DIR}/plugins
|
||||
cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so
|
||||
cp -a yolo ${SCRIPT_DIR}/
|
||||
@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
CUDA_HOME=/usr/local/cuda
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
|
||||
OUTPUT_FOLDER=/tensorrt_models
|
||||
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
|
||||
|
||||
# Create output folder
|
||||
mkdir -p ${OUTPUT_FOLDER}
|
||||
|
||||
# Install packages
|
||||
pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
|
||||
|
||||
# Clone tensorrt_demos repo
|
||||
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
|
||||
|
||||
# Build libyolo
|
||||
cd /tensorrt_demos/plugins && make all
|
||||
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
|
||||
|
||||
# Download yolo weights
|
||||
cd /tensorrt_demos/yolo && ./download_yolo.sh
|
||||
|
||||
# Build trt engine
|
||||
cd /tensorrt_demos/yolo
|
||||
|
||||
for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
python3 yolo_to_onnx.py -m ${model}
|
||||
python3 onnx_to_tensorrt.py -m ${model}
|
||||
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
|
||||
done
|
||||
71
docs/docs/configuration/autotracking.md
Normal file
71
docs/docs/configuration/autotracking.md
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
id: autotracking
|
||||
title: Autotracking
|
||||
---
|
||||
|
||||
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame.
|
||||
|
||||
## Autotracking behavior
|
||||
|
||||
Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it.
|
||||
|
||||
Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will track that new object.
|
||||
|
||||
When tracking has ended, Frigate will return to the camera preset specified by the `return_preset` configuration entry.
|
||||
|
||||
## Checking ONVIF camera support
|
||||
|
||||
Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry).
|
||||
|
||||
Many cheaper PTZs likely don't support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported.
|
||||
|
||||
Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera.
|
||||
|
||||
## Configuration
|
||||
|
||||
First, configure the ONVIF parameters for your camera, then specify the object types to track, a required zone the object must enter, and a camera preset name to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset.
|
||||
|
||||
An [ONVIF connection](cameras.md) is required for autotracking to function.
|
||||
|
||||
Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
ptzcamera:
|
||||
...
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
# Optional: username for login.
|
||||
# NOTE: Some devices require admin to access ONVIF.
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
# Optional: enable/disable object autotracking. (default: shown below)
|
||||
enabled: False
|
||||
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
||||
track:
|
||||
- person
|
||||
# Required: Begin automatically tracking an object when it enters any of the listed zones.
|
||||
required_zones:
|
||||
- zone_name
|
||||
# Required: Name of ONVIF camera preset to return to when tracking is over. (default: shown below)
|
||||
return_preset: home
|
||||
# Optional: Seconds to delay before returning to preset. (default: shown below)
|
||||
timeout: 10
|
||||
```
|
||||
|
||||
## Best practices and considerations
|
||||
|
||||
Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR.
|
||||
|
||||
The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases (especially for faster moving objects), the default 5 fps is insufficient for the motion estimator to perform accurately. 10 fps is the current recommendation. Higher frame rates will likely not be more performant and will only slow down Frigate and the motion estimator. Adjust your camera to output at least 10 frames per second and change the `fps` parameter in the [detect configuration](index.md) of your configuration file.
|
||||
|
||||
A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. If Frigate already has trouble keeping track of your object, the autotracker will struggle as well.
|
||||
|
||||
The autotracker will add PTZ motion requests to a queue while the motor is moving. Once the motor stops, the events in the queue will be executed together as one large move (rather than incremental moves). If your PTZ's motor is slow, you may not be able to reliably autotrack fast moving objects.
|
||||
@ -66,3 +66,5 @@ cameras:
|
||||
```
|
||||
|
||||
then PTZ controls will be available in the cameras WebUI.
|
||||
|
||||
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs.
|
||||
|
||||
@ -145,6 +145,12 @@ audio:
|
||||
enabled: False
|
||||
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
|
||||
max_not_heard: 30
|
||||
# Optional: Configure the min rms volume required to run audio detection (default: shown below)
|
||||
# As a rule of thumb:
|
||||
# - 200 - high sensitivity
|
||||
# - 500 - medium sensitivity
|
||||
# - 1000 - low sensitivity
|
||||
min_volume: 500
|
||||
# Optional: Types of audio to listen for (default: shown below)
|
||||
listen:
|
||||
- bark
|
||||
@ -555,6 +561,21 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
# Optional: enable/disable object autotracking. (default: shown below)
|
||||
enabled: False
|
||||
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
||||
track:
|
||||
- person
|
||||
# Required: Begin automatically tracking an object when it enters any of the listed zones.
|
||||
required_zones:
|
||||
- zone_name
|
||||
# Required: Name of ONVIF camera preset to return to when tracking is over.
|
||||
return_preset: preset_name
|
||||
# Optional: Seconds to delay before returning to preset. (default: shown below)
|
||||
timeout: 10
|
||||
|
||||
# Optional: Configuration for how to sort the cameras in the Birdseye view.
|
||||
birdseye:
|
||||
|
||||
@ -174,9 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t
|
||||
|
||||
### Minimum Hardware Support
|
||||
|
||||
The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support.
|
||||
The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
@ -192,22 +190,15 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben
|
||||
|
||||
### Generate Models
|
||||
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models.
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
|
||||
|
||||
To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script.
|
||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||
|
||||
```bash
|
||||
mkdir trt-models
|
||||
wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh
|
||||
chmod +x tensorrt_models.sh
|
||||
docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh
|
||||
```
|
||||
To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||
|
||||
The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config.
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it.
|
||||
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it.
|
||||
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
|
||||
```
|
||||
yolov3-288
|
||||
@ -237,11 +228,20 @@ yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
|
||||
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this:
|
||||
|
||||
```yml
|
||||
frigate:
|
||||
environment:
|
||||
- YOLO_MODELS="yolov4-608,yolov7x-640"
|
||||
- USE_FP16=false
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated.
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@ -250,7 +250,7 @@ detectors:
|
||||
device: 0 #This is the default, select the first GPU
|
||||
|
||||
model:
|
||||
path: /trt-models/yolov7-tiny-416.trt
|
||||
path: /config/model_cache/tensorrt/yolov7-tiny-416.trt
|
||||
input_tensor: nchw
|
||||
input_pixel_format: rgb
|
||||
width: 416
|
||||
|
||||
@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
|
||||
|
||||
### TensorRT
|
||||
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
@ -63,7 +63,9 @@ Message published for each changed event. The first message is published when th
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has moved from a stationary position
|
||||
"attributes": [], // set of unique attributes that have been identified on the object
|
||||
"attributes": {
|
||||
"face": 0.64
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [] // detailed data about the current attributes in this frame
|
||||
},
|
||||
"after": {
|
||||
@ -90,13 +92,15 @@ Message published for each changed event. The first message is published when th
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has changed position
|
||||
"attributes": ["face"], // set of unique attributes that have been identified on the object
|
||||
"attributes": {
|
||||
"face": 0.86
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [
|
||||
// detailed data about the current attributes in this frame
|
||||
{
|
||||
"label": "face",
|
||||
"box": [442, 506, 534, 524],
|
||||
"score": 0.64
|
||||
"score": 0.86
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -184,7 +188,15 @@ Topic to send PTZ commands to camera.
|
||||
|
||||
| Command | Description |
|
||||
| ---------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| `preset-<preset_name>` | send command to move to preset with name `<preset_name>` |
|
||||
| `preset_<preset_name>` | send command to move to preset with name `<preset_name>` |
|
||||
| `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] |
|
||||
| `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] |
|
||||
| `STOP` | send command to stop moving |
|
||||
|
||||
### `frigate/<camera_name>/ptz_autotracker/set`
|
||||
|
||||
Topic to turn the PTZ autotracker for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/ptz_autotracker/state`
|
||||
|
||||
Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
@ -10,6 +10,7 @@ from multiprocessing.synchronize import Event as MpEvent
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
import faster_fifo as ff
|
||||
import psutil
|
||||
from faster_fifo import Queue
|
||||
from peewee_migrate import Router
|
||||
@ -25,6 +26,7 @@ from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
CONFIG_DIR,
|
||||
DEFAULT_DB_PATH,
|
||||
DEFAULT_QUEUE_BUFFER_SIZE,
|
||||
EXPORT_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
RECORD_DIR,
|
||||
@ -40,7 +42,8 @@ from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.output import output_frames
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.ptz import OnvifController
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.record.record import manage_recordings
|
||||
from frigate.stats import StatsEmitter, stats_init
|
||||
from frigate.storage import StorageMaintainer
|
||||
@ -56,11 +59,11 @@ logger = logging.getLogger(__name__)
|
||||
class FrigateApp:
|
||||
def __init__(self) -> None:
|
||||
self.stop_event: MpEvent = mp.Event()
|
||||
self.detection_queue: Queue = mp.Queue()
|
||||
self.detection_queue: Queue = ff.Queue()
|
||||
self.detectors: dict[str, ObjectDetectProcess] = {}
|
||||
self.detection_out_events: dict[str, MpEvent] = {}
|
||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||
self.log_queue: Queue = mp.Queue()
|
||||
self.log_queue: Queue = ff.Queue()
|
||||
self.plus_api = PlusApi()
|
||||
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
|
||||
self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
|
||||
@ -132,6 +135,13 @@ class FrigateApp:
|
||||
"i",
|
||||
self.config.cameras[camera_name].motion.improve_contrast,
|
||||
),
|
||||
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].onvif.autotracking.enabled,
|
||||
),
|
||||
"ptz_stopped": mp.Event(),
|
||||
"motion_threshold": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
@ -160,6 +170,7 @@ class FrigateApp:
|
||||
"capture_process": None,
|
||||
"process": None,
|
||||
}
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].set()
|
||||
self.feature_metrics[camera_name] = {
|
||||
"audio_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
@ -188,8 +199,8 @@ class FrigateApp:
|
||||
|
||||
def init_queues(self) -> None:
|
||||
# Queues for clip processing
|
||||
self.event_queue: Queue = mp.Queue()
|
||||
self.event_processed_queue: Queue = mp.Queue()
|
||||
self.event_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
self.event_processed_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
self.video_output_queue: Queue = mp.Queue(
|
||||
maxsize=len(self.config.cameras.keys()) * 2
|
||||
)
|
||||
@ -200,10 +211,10 @@ class FrigateApp:
|
||||
)
|
||||
|
||||
# Queue for recordings info
|
||||
self.recordings_info_queue: Queue = mp.Queue()
|
||||
self.recordings_info_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
|
||||
# Queue for timeline events
|
||||
self.timeline_queue: Queue = mp.Queue()
|
||||
self.timeline_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
|
||||
def init_database(self) -> None:
|
||||
def vacuum_db(db: SqliteExtDatabase) -> None:
|
||||
@ -306,7 +317,7 @@ class FrigateApp:
|
||||
)
|
||||
|
||||
def init_onvif(self) -> None:
|
||||
self.onvif_controller = OnvifController(self.config)
|
||||
self.onvif_controller = OnvifController(self.config, self.camera_metrics)
|
||||
|
||||
def init_dispatcher(self) -> None:
|
||||
comms: list[Communicator] = []
|
||||
@ -360,6 +371,15 @@ class FrigateApp:
|
||||
detector_config,
|
||||
)
|
||||
|
||||
def start_ptz_autotracker(self) -> None:
|
||||
self.ptz_autotracker_thread = PtzAutoTrackerThread(
|
||||
self.config,
|
||||
self.onvif_controller,
|
||||
self.camera_metrics,
|
||||
self.stop_event,
|
||||
)
|
||||
self.ptz_autotracker_thread.start()
|
||||
|
||||
def start_detected_frames_processor(self) -> None:
|
||||
self.detected_frames_processor = TrackedObjectProcessor(
|
||||
self.config,
|
||||
@ -369,6 +389,7 @@ class FrigateApp:
|
||||
self.event_processed_queue,
|
||||
self.video_output_queue,
|
||||
self.recordings_info_queue,
|
||||
self.ptz_autotracker_thread,
|
||||
self.stop_event,
|
||||
)
|
||||
self.detected_frames_processor.start()
|
||||
@ -533,6 +554,7 @@ class FrigateApp:
|
||||
sys.exit(1)
|
||||
self.start_detectors()
|
||||
self.start_video_output_processor()
|
||||
self.start_ptz_autotracker()
|
||||
self.start_detected_frames_processor()
|
||||
self.start_camera_processors()
|
||||
self.start_camera_capture_processes()
|
||||
@ -577,6 +599,7 @@ class FrigateApp:
|
||||
|
||||
self.dispatcher.stop()
|
||||
self.detected_frames_processor.join()
|
||||
self.ptz_autotracker_thread.join()
|
||||
self.event_processor.join()
|
||||
self.event_cleanup.join()
|
||||
self.stats_emitter.join()
|
||||
|
||||
@ -5,9 +5,9 @@ from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.ptz import OnvifCommandEnum, OnvifController
|
||||
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.util import restart_frigate
|
||||
from frigate.util.services import restart_frigate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -55,6 +55,7 @@ class Dispatcher:
|
||||
"audio": self._on_audio_command,
|
||||
"detect": self._on_detect_command,
|
||||
"improve_contrast": self._on_motion_improve_contrast_command,
|
||||
"ptz_autotracker": self._on_ptz_autotracker_command,
|
||||
"motion": self._on_motion_command,
|
||||
"motion_contour_area": self._on_motion_contour_area_command,
|
||||
"motion_threshold": self._on_motion_threshold_command,
|
||||
@ -159,6 +160,25 @@ class Dispatcher:
|
||||
|
||||
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
|
||||
|
||||
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for ptz_autotracker topic."""
|
||||
ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking
|
||||
|
||||
if payload == "ON":
|
||||
if not self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value:
|
||||
logger.info(f"Turning on ptz autotracker for {camera_name}")
|
||||
self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value = True
|
||||
ptz_autotracker_settings.enabled = True
|
||||
elif payload == "OFF":
|
||||
if self.camera_metrics[camera_name]["ptz_autotracker_enabled"].value:
|
||||
logger.info(f"Turning off ptz autotracker for {camera_name}")
|
||||
self.camera_metrics[camera_name][
|
||||
"ptz_autotracker_enabled"
|
||||
].value = False
|
||||
ptz_autotracker_settings.enabled = False
|
||||
|
||||
self.publish(f"{camera_name}/ptz_autotracker/state", payload, retain=True)
|
||||
|
||||
def _on_motion_contour_area_command(self, camera_name: str, payload: int) -> None:
|
||||
"""Callback for motion contour topic."""
|
||||
try:
|
||||
@ -253,7 +273,7 @@ class Dispatcher:
|
||||
try:
|
||||
if "preset" in payload.lower():
|
||||
command = OnvifCommandEnum.preset
|
||||
param = payload.lower().split("-")[1]
|
||||
param = payload.lower()[payload.index("_") + 1 :]
|
||||
else:
|
||||
command = OnvifCommandEnum[payload.lower()]
|
||||
param = ""
|
||||
|
||||
@ -69,6 +69,11 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr]
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/ptz_autotracker/state",
|
||||
"ON" if camera.onvif.autotracking.enabled else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/motion_threshold/state",
|
||||
camera.motion.threshold, # type: ignore[union-attr]
|
||||
@ -152,6 +157,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"audio",
|
||||
"motion",
|
||||
"improve_contrast",
|
||||
"ptz_autotracker",
|
||||
"motion_threshold",
|
||||
"motion_contour_area",
|
||||
]
|
||||
|
||||
@ -22,13 +22,13 @@ from frigate.ffmpeg_presets import (
|
||||
parse_preset_output_rtmp,
|
||||
)
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.util import (
|
||||
create_mask,
|
||||
from frigate.util.builtin import (
|
||||
deep_merge,
|
||||
escape_special_characters,
|
||||
get_ffmpeg_arg_list,
|
||||
load_config_with_no_duplicates,
|
||||
)
|
||||
from frigate.util.image import create_mask
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -138,11 +138,31 @@ class MqttConfig(FrigateBaseModel):
|
||||
return v
|
||||
|
||||
|
||||
class PtzAutotrackConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
|
||||
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||
required_zones: List[str] = Field(
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to begin autotracking.",
|
||||
)
|
||||
return_preset: str = Field(
|
||||
default="home",
|
||||
title="Name of camera preset to return to when object tracking is over.",
|
||||
)
|
||||
timeout: int = Field(
|
||||
default=10, title="Seconds to delay before returning to preset."
|
||||
)
|
||||
|
||||
|
||||
class OnvifConfig(FrigateBaseModel):
|
||||
host: str = Field(default="", title="Onvif Host")
|
||||
port: int = Field(default=8000, title="Onvif Port")
|
||||
user: Optional[str] = Field(title="Onvif Username")
|
||||
password: Optional[str] = Field(title="Onvif Password")
|
||||
autotracking: PtzAutotrackConfig = Field(
|
||||
default_factory=PtzAutotrackConfig,
|
||||
title="PTZ auto tracking config.",
|
||||
)
|
||||
|
||||
|
||||
class RetainModeEnum(str, Enum):
|
||||
@ -403,6 +423,9 @@ class AudioConfig(FrigateBaseModel):
|
||||
max_not_heard: int = Field(
|
||||
default=30, title="Seconds of not hearing the type of audio to end the event."
|
||||
)
|
||||
min_volume: int = Field(
|
||||
default=500, title="Min volume required to run audio detection."
|
||||
)
|
||||
listen: List[str] = Field(
|
||||
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
|
||||
)
|
||||
@ -902,6 +925,17 @@ def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
|
||||
)
|
||||
|
||||
|
||||
def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
|
||||
"""Verify that required_zones are specified when autotracking is enabled."""
|
||||
if (
|
||||
camera_config.onvif.autotracking.enabled
|
||||
and not camera_config.onvif.autotracking.required_zones
|
||||
):
|
||||
raise ValueError(
|
||||
f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones."
|
||||
)
|
||||
|
||||
|
||||
class FrigateConfig(FrigateBaseModel):
|
||||
mqtt: MqttConfig = Field(title="MQTT Configuration.")
|
||||
database: DatabaseConfig = Field(
|
||||
@ -1077,6 +1111,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
verify_recording_retention(camera_config)
|
||||
verify_recording_segments_setup_with_reasonable_time(camera_config)
|
||||
verify_zone_objects_are_tracked(camera_config)
|
||||
verify_autotrack_zones(camera_config)
|
||||
|
||||
if camera_config.rtmp.enabled:
|
||||
logger.warning(
|
||||
|
||||
@ -11,7 +11,6 @@ YAML_EXT = (".yaml", ".yml")
|
||||
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
||||
PLUS_ENV_VAR = "PLUS_API_KEY"
|
||||
PLUS_API_HOST = "https://api.frigate.video"
|
||||
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
|
||||
|
||||
# Attributes
|
||||
|
||||
@ -47,3 +46,7 @@ DRIVER_INTEL_iHD = "iHD"
|
||||
|
||||
MAX_SEGMENT_DURATION = 600
|
||||
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times
|
||||
|
||||
# Queue Values
|
||||
|
||||
DEFAULT_QUEUE_BUFFER_SIZE = 2000 * 1000 # 2MB
|
||||
|
||||
@ -11,7 +11,7 @@ from pydantic import BaseModel, Extra, Field
|
||||
from pydantic.fields import PrivateAttr
|
||||
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.util import load_labels
|
||||
from frigate.util.builtin import load_labels
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -27,14 +27,17 @@ class EdgeTpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: EdgeTpuDetectorConfig):
|
||||
device_config = {"device": "usb"}
|
||||
device_config = {}
|
||||
if detector_config.device is not None:
|
||||
device_config = {"device": detector_config.device}
|
||||
|
||||
edge_tpu_delegate = None
|
||||
|
||||
try:
|
||||
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||
device_type = (
|
||||
device_config["device"] if "device" in device_config else "auto"
|
||||
)
|
||||
logger.info(f"Attempting to load TPU as {device_type}")
|
||||
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
|
||||
logger.info("TPU found")
|
||||
self.interpreter = Interpreter(
|
||||
|
||||
@ -78,7 +78,7 @@ class TensorRtDetector(DetectionApi):
|
||||
try:
|
||||
trt.init_libnvinfer_plugins(self.trt_logger, "")
|
||||
|
||||
ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so")
|
||||
ctypes.cdll.LoadLibrary("/usr/local/lib/libyolo_layer.so")
|
||||
except OSError as e:
|
||||
logger.error(
|
||||
"ERROR: failed to load libraries. %s",
|
||||
|
||||
@ -26,7 +26,8 @@ from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe
|
||||
from frigate.object_detection import load_labels
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import get_ffmpeg_arg_list, listen
|
||||
from frigate.util.builtin import get_ffmpeg_arg_list
|
||||
from frigate.util.services import listen
|
||||
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
||||
|
||||
try:
|
||||
@ -168,6 +169,10 @@ class AudioEventMaintainer(threading.Thread):
|
||||
if not self.feature_metrics[self.config.name]["audio_enabled"].value:
|
||||
return
|
||||
|
||||
rms = np.sqrt(np.mean(np.absolute(np.square(audio.astype(np.float32)))))
|
||||
|
||||
# only run audio detection when volume is above min_volume
|
||||
if rms >= self.config.audio.min_volume:
|
||||
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
|
||||
model_detections = self.detector.detect(waveform)
|
||||
|
||||
@ -191,7 +196,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
event_id = resp.json()[0]["event_id"]
|
||||
event_id = resp.json()["event_id"]
|
||||
self.detections[label] = {
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
|
||||
@ -14,7 +14,7 @@ from faster_fifo import Queue
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.events.maintainer import EventTypeEnum
|
||||
from frigate.util import draw_box_with_label
|
||||
from frigate.util.image import draw_box_with_label
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -57,8 +57,12 @@ class ExternalEventProcessor:
|
||||
"label": label,
|
||||
"sub_label": sub_label,
|
||||
"camera": camera,
|
||||
"start_time": now,
|
||||
"end_time": now + duration if duration is not None else None,
|
||||
"start_time": now - camera_config.record.events.pre_capture,
|
||||
"end_time": now
|
||||
+ duration
|
||||
+ camera_config.record.events.post_capture
|
||||
if duration is not None
|
||||
else None,
|
||||
"thumbnail": thumbnail,
|
||||
"has_clip": camera_config.record.enabled and include_recording,
|
||||
"has_snapshot": True,
|
||||
|
||||
@ -11,7 +11,7 @@ from faster_fifo import Queue
|
||||
from frigate.config import EventsConfig, FrigateConfig
|
||||
from frigate.models import Event
|
||||
from frigate.types import CameraMetricsTypes
|
||||
from frigate.util import to_relative_box
|
||||
from frigate.util.builtin import to_relative_box
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -199,7 +199,8 @@ class EventProcessor(threading.Thread):
|
||||
|
||||
# only overwrite the sub_label in the database if it's set
|
||||
if event_data.get("sub_label") is not None:
|
||||
event[Event.sub_label] = event_data["sub_label"]
|
||||
event[Event.sub_label] = event_data["sub_label"][0]
|
||||
event[Event.data]["sub_label_score"] = event_data["sub_label"][1]
|
||||
|
||||
(
|
||||
Event.insert(event)
|
||||
|
||||
@ -5,8 +5,7 @@ import os
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from frigate.const import BTBN_PATH
|
||||
from frigate.util import vainfo_hwaccel
|
||||
from frigate.util.services import vainfo_hwaccel
|
||||
from frigate.version import VERSION
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -43,7 +42,11 @@ class LibvaGpuSelector:
|
||||
return ""
|
||||
|
||||
|
||||
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout"
|
||||
TIMEOUT_PARAM = (
|
||||
"-timeout"
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59")) >= 59
|
||||
else "-stimeout"
|
||||
)
|
||||
|
||||
_gpu_selector = LibvaGpuSelector()
|
||||
_user_agent_args = [
|
||||
@ -107,14 +110,14 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
}
|
||||
|
||||
PRESETS_HW_ACCEL_SCALE = {
|
||||
"preset-rpi-32-h264": "-r {0} -s {1}x{2}",
|
||||
"preset-rpi-64-h264": "-r {0} -s {1}x{2}",
|
||||
"preset-rpi-32-h264": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
|
||||
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||
"default": "-r {0} -s {1}x{2}",
|
||||
"default": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||
}
|
||||
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
|
||||
122
frigate/http.py
122
frigate/http.py
@ -24,27 +24,27 @@ from flask import (
|
||||
make_response,
|
||||
request,
|
||||
)
|
||||
from peewee import DoesNotExist, SqliteDatabase, fn, operator
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.const import CLIPS_DIR, CONFIG_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.models import Event, Recordings, Timeline
|
||||
from frigate.object_processing import TrackedObject
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.ptz import OnvifController
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.record.export import PlaybackFactorEnum, RecordingExporter
|
||||
from frigate.stats import stats_snapshot
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.util import (
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
ffprobe_stream,
|
||||
get_tz_modifiers,
|
||||
restart_frigate,
|
||||
vainfo_hwaccel,
|
||||
update_yaml_from_url,
|
||||
)
|
||||
from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel
|
||||
from frigate.version import VERSION
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -54,7 +54,7 @@ bp = Blueprint("frigate", __name__)
|
||||
|
||||
def create_app(
|
||||
frigate_config,
|
||||
database: SqliteDatabase,
|
||||
database: SqliteQueueDatabase,
|
||||
stats_tracking,
|
||||
detected_frames_processor,
|
||||
storage_maintainer: StorageMaintainer,
|
||||
@ -420,8 +420,8 @@ def get_labels():
|
||||
else:
|
||||
events = Event.select(Event.label).distinct()
|
||||
except Exception as e:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"Failed to get labels: {e}"}), 404
|
||||
)
|
||||
|
||||
labels = sorted([e.label for e in events])
|
||||
@ -435,8 +435,9 @@ def get_sub_labels():
|
||||
try:
|
||||
events = Event.select(Event.sub_label).distinct()
|
||||
except Exception as e:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"Failed to get sub_labels: {e}"}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"Failed to get sub_labels: {e}"}),
|
||||
404,
|
||||
)
|
||||
|
||||
sub_labels = [e.sub_label for e in events]
|
||||
@ -869,12 +870,17 @@ def events():
|
||||
@bp.route("/events/<camera_name>/<label>/create", methods=["POST"])
|
||||
def create_event(camera_name, label):
|
||||
if not camera_name or not current_app.frigate_config.cameras.get(camera_name):
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}, 404
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
if not label:
|
||||
return jsonify({"success": False, "message": f"{label} must be set."}, 404)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"{label} must be set."}), 404
|
||||
)
|
||||
|
||||
json: dict[str, any] = request.get_json(silent=True) or {}
|
||||
|
||||
@ -892,17 +898,19 @@ def create_event(camera_name, label):
|
||||
frame,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"The error is {e}")
|
||||
return jsonify(
|
||||
{"success": False, "message": f"An unknown error occurred: {e}"}, 404
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"An unknown error occurred: {e}"}),
|
||||
404,
|
||||
)
|
||||
|
||||
return jsonify(
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Successfully created event.",
|
||||
"event_id": event_id,
|
||||
},
|
||||
}
|
||||
),
|
||||
200,
|
||||
)
|
||||
|
||||
@ -915,11 +923,16 @@ def end_event(event_id):
|
||||
end_time = json.get("end_time", datetime.now().timestamp())
|
||||
current_app.external_processor.finish_manual_event(event_id, end_time)
|
||||
except Exception:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{event_id} must be set and valid."}, 404
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{event_id} must be set and valid."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
return jsonify({"success": True, "message": "Event successfully ended."}, 200)
|
||||
return make_response(
|
||||
jsonify({"success": True, "message": "Event successfully ended."}), 200
|
||||
)
|
||||
|
||||
|
||||
@bp.route("/config")
|
||||
@ -1030,6 +1043,48 @@ def config_save():
|
||||
return "Config successfully saved.", 200
|
||||
|
||||
|
||||
@bp.route("/config/set", methods=["PUT"])
|
||||
def config_set():
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
f.close()
|
||||
|
||||
try:
|
||||
update_yaml_from_url(config_file, request.url)
|
||||
with open(config_file, "r") as f:
|
||||
new_raw_config = f.read()
|
||||
f.close()
|
||||
# Validate the config schema
|
||||
try:
|
||||
FrigateConfig.parse_raw(new_raw_config)
|
||||
except Exception:
|
||||
with open(config_file, "w") as f:
|
||||
f.write(old_raw_config)
|
||||
f.close()
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"\nConfig Error:\n\n{str(traceback.format_exc())}",
|
||||
}
|
||||
),
|
||||
400,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error updating config: {e}")
|
||||
return "Error updating config", 500
|
||||
|
||||
return "Config successfully updated", 200
|
||||
|
||||
|
||||
@bp.route("/config/schema.json")
|
||||
def config_schema():
|
||||
return current_app.response_class(
|
||||
@ -1104,10 +1159,14 @@ def latest_frame(camera_name):
|
||||
frame = current_app.detected_frames_processor.get_current_frame(
|
||||
camera_name, draw_options
|
||||
)
|
||||
retry_interval = float(
|
||||
current_app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
|
||||
or 10
|
||||
)
|
||||
|
||||
if frame is None or datetime.now().timestamp() > (
|
||||
current_app.detected_frames_processor.get_current_frame_time(camera_name)
|
||||
+ 10
|
||||
+ retry_interval
|
||||
):
|
||||
if current_app.camera_error_image is None:
|
||||
error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
|
||||
@ -1575,21 +1634,24 @@ def ffprobe():
|
||||
path_param = request.args.get("paths", "")
|
||||
|
||||
if not path_param:
|
||||
return jsonify(
|
||||
{"success": False, "message": "Path needs to be provided."}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Path needs to be provided."}), 404
|
||||
)
|
||||
|
||||
if path_param.startswith("camera"):
|
||||
camera = path_param[7:]
|
||||
|
||||
if camera not in current_app.frigate_config.cameras.keys():
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera} is not a valid camera."}, "404"
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{camera} is not a valid camera."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
if not current_app.frigate_config.cameras[camera].enabled:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera} is not enabled."}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"{camera} is not enabled."}), 404
|
||||
)
|
||||
|
||||
paths = map(
|
||||
|
||||
@ -13,7 +13,7 @@ from typing import Deque, Optional
|
||||
from faster_fifo import Queue
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.util import clean_camera_user_pass
|
||||
from frigate.util.builtin import clean_camera_user_pass
|
||||
|
||||
|
||||
def listener_configurer() -> None:
|
||||
|
||||
@ -38,7 +38,9 @@ class Event(Model): # type: ignore[misc]
|
||||
IntegerField()
|
||||
) # TODO remove when columns can be dropped without rebuilding table
|
||||
retain_indefinitely = BooleanField(default=False)
|
||||
ratio = FloatField(default=1.0)
|
||||
ratio = FloatField(
|
||||
default=1.0
|
||||
) # TODO remove when columns can be dropped without rebuilding table
|
||||
plus_id = CharField(max_length=30)
|
||||
model_hash = CharField(max_length=32)
|
||||
detector_type = CharField(max_length=32)
|
||||
|
||||
@ -7,12 +7,15 @@ import signal
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import faster_fifo as ff
|
||||
import numpy as np
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.detectors import create_detector
|
||||
from frigate.detectors.detector_config import InputTensorEnum
|
||||
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
|
||||
from frigate.util.builtin import EventsPerSecond, load_labels
|
||||
from frigate.util.image import SharedMemoryFrameManager
|
||||
from frigate.util.services import listen
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -72,7 +75,7 @@ class LocalObjectDetector(ObjectDetector):
|
||||
|
||||
def run_detector(
|
||||
name: str,
|
||||
detection_queue: mp.Queue,
|
||||
detection_queue: ff.Queue,
|
||||
out_events: dict[str, mp.Event],
|
||||
avg_speed,
|
||||
start,
|
||||
|
||||
@ -22,7 +22,8 @@ from frigate.config import (
|
||||
)
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.events.maintainer import EventTypeEnum
|
||||
from frigate.util import (
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
area,
|
||||
calculate_region,
|
||||
@ -111,7 +112,7 @@ class TrackedObject:
|
||||
self.zone_presence = {}
|
||||
self.current_zones = []
|
||||
self.entered_zones = []
|
||||
self.attributes = set()
|
||||
self.attributes = defaultdict(float)
|
||||
self.false_positive = True
|
||||
self.has_clip = False
|
||||
self.has_snapshot = False
|
||||
@ -143,6 +144,7 @@ class TrackedObject:
|
||||
def update(self, current_frame_time, obj_data):
|
||||
thumb_update = False
|
||||
significant_change = False
|
||||
autotracker_update = False
|
||||
# if the object is not in the current frame, add a 0.0 to the score history
|
||||
if obj_data["frame_time"] != current_frame_time:
|
||||
self.score_history.append(0.0)
|
||||
@ -205,15 +207,19 @@ class TrackedObject:
|
||||
|
||||
# maintain attributes
|
||||
for attr in obj_data["attributes"]:
|
||||
self.attributes.add(attr["label"])
|
||||
if self.attributes[attr["label"]] < attr["score"]:
|
||||
self.attributes[attr["label"]] = attr["score"]
|
||||
|
||||
# populate the sub_label for car with first logo if it exists
|
||||
if self.obj_data["label"] == "car" and "sub_label" not in self.obj_data:
|
||||
recognized_logos = self.attributes.intersection(
|
||||
set(["ups", "fedex", "amazon"])
|
||||
)
|
||||
# populate the sub_label for car with highest scoring logo
|
||||
if self.obj_data["label"] == "car":
|
||||
recognized_logos = {
|
||||
k: self.attributes[k]
|
||||
for k in ["ups", "fedex", "amazon"]
|
||||
if k in self.attributes
|
||||
}
|
||||
if len(recognized_logos) > 0:
|
||||
self.obj_data["sub_label"] = recognized_logos.pop()
|
||||
max_logo = max(recognized_logos, key=recognized_logos.get)
|
||||
self.obj_data["sub_label"] = (max_logo, recognized_logos[max_logo])
|
||||
|
||||
# check for significant change
|
||||
if not self.false_positive:
|
||||
@ -236,9 +242,15 @@ class TrackedObject:
|
||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
|
||||
significant_change = True
|
||||
|
||||
# update autotrack at half fps
|
||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > (
|
||||
1 / (self.camera_config.detect.fps / 2)
|
||||
):
|
||||
autotracker_update = True
|
||||
|
||||
self.obj_data.update(obj_data)
|
||||
self.current_zones = current_zones
|
||||
return (thumb_update, significant_change)
|
||||
return (thumb_update, significant_change, autotracker_update)
|
||||
|
||||
def to_dict(self, include_thumbnail: bool = False):
|
||||
(self.thumbnail_data["frame_time"] if self.thumbnail_data is not None else 0.0)
|
||||
@ -266,7 +278,7 @@ class TrackedObject:
|
||||
"entered_zones": self.entered_zones.copy(),
|
||||
"has_clip": self.has_clip,
|
||||
"has_snapshot": self.has_snapshot,
|
||||
"attributes": list(self.attributes),
|
||||
"attributes": self.attributes,
|
||||
"current_attributes": self.obj_data["attributes"],
|
||||
}
|
||||
|
||||
@ -437,7 +449,11 @@ def zone_filtered(obj: TrackedObject, object_config):
|
||||
# Maintains the state of a camera
|
||||
class CameraState:
|
||||
def __init__(
|
||||
self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager
|
||||
self,
|
||||
name,
|
||||
config: FrigateConfig,
|
||||
frame_manager: SharedMemoryFrameManager,
|
||||
ptz_autotracker_thread: PtzAutoTrackerThread,
|
||||
):
|
||||
self.name = name
|
||||
self.config = config
|
||||
@ -455,6 +471,7 @@ class CameraState:
|
||||
self.regions = []
|
||||
self.previous_frame_id = None
|
||||
self.callbacks = defaultdict(list)
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
def get_current_frame(self, draw_options={}):
|
||||
with self.current_frame_lock:
|
||||
@ -476,6 +493,21 @@ class CameraState:
|
||||
thickness = 1
|
||||
color = (255, 0, 0)
|
||||
|
||||
# draw thicker box around ptz autotracked object
|
||||
if (
|
||||
self.camera_config.onvif.autotracking.enabled
|
||||
and self.ptz_autotracker_thread.ptz_autotracker.tracked_object[
|
||||
self.name
|
||||
]
|
||||
is not None
|
||||
and obj["id"]
|
||||
== self.ptz_autotracker_thread.ptz_autotracker.tracked_object[
|
||||
self.name
|
||||
].obj_data["id"]
|
||||
):
|
||||
thickness = 5
|
||||
color = self.config.model.colormap[obj["label"]]
|
||||
|
||||
# draw the bounding boxes on the frame
|
||||
box = obj["box"]
|
||||
draw_box_with_label(
|
||||
@ -589,10 +621,14 @@ class CameraState:
|
||||
|
||||
for id in updated_ids:
|
||||
updated_obj = tracked_objects[id]
|
||||
thumb_update, significant_update = updated_obj.update(
|
||||
thumb_update, significant_update, autotracker_update = updated_obj.update(
|
||||
frame_time, current_detections[id]
|
||||
)
|
||||
|
||||
if autotracker_update or significant_update:
|
||||
for c in self.callbacks["autotrack"]:
|
||||
c(self.name, updated_obj, frame_time)
|
||||
|
||||
if thumb_update:
|
||||
# ensure this frame is stored in the cache
|
||||
if (
|
||||
@ -733,6 +769,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
event_processed_queue,
|
||||
video_output_queue,
|
||||
recordings_info_queue,
|
||||
ptz_autotracker_thread,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
@ -748,6 +785,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.camera_states: dict[str, CameraState] = {}
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.last_motion_detected: dict[str, float] = {}
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
def start(camera, obj: TrackedObject, current_frame_time):
|
||||
self.event_queue.put(
|
||||
@ -774,6 +812,9 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
)
|
||||
)
|
||||
|
||||
def autotrack(camera, obj: TrackedObject, current_frame_time):
|
||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||
|
||||
def end(camera, obj: TrackedObject, current_frame_time):
|
||||
# populate has_snapshot
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
@ -822,6 +863,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
"type": "end",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
self.ptz_autotracker_thread.ptz_autotracker.end_object(camera, obj)
|
||||
|
||||
self.event_queue.put(
|
||||
(
|
||||
@ -858,8 +900,11 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
|
||||
|
||||
for camera in self.config.cameras.keys():
|
||||
camera_state = CameraState(camera, self.config, self.frame_manager)
|
||||
camera_state = CameraState(
|
||||
camera, self.config, self.frame_manager, self.ptz_autotracker_thread
|
||||
)
|
||||
camera_state.on("start", start)
|
||||
camera_state.on("autotrack", autotrack)
|
||||
camera_state.on("update", update)
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
|
||||
@ -24,11 +24,70 @@ from ws4py.websocket import WebSocket
|
||||
|
||||
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE
|
||||
from frigate.util import SharedMemoryFrameManager, copy_yuv_to_position, get_yuv_crop
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
copy_yuv_to_position,
|
||||
get_yuv_crop,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_standard_aspect_ratio(width, height) -> tuple[int, int]:
|
||||
"""Ensure that only standard aspect ratios are used."""
|
||||
known_aspects = [
|
||||
(16, 9),
|
||||
(9, 16),
|
||||
(32, 9),
|
||||
(12, 9),
|
||||
(9, 12),
|
||||
] # aspects are scaled to have common relative size
|
||||
known_aspects_ratios = list(
|
||||
map(lambda aspect: aspect[0] / aspect[1], known_aspects)
|
||||
)
|
||||
closest = min(
|
||||
known_aspects_ratios,
|
||||
key=lambda x: abs(x - (width / height)),
|
||||
)
|
||||
return known_aspects[known_aspects_ratios.index(closest)]
|
||||
|
||||
|
||||
class Canvas:
|
||||
def __init__(self, canvas_width: int, canvas_height: int) -> None:
|
||||
gcd = math.gcd(canvas_width, canvas_height)
|
||||
self.aspect = get_standard_aspect_ratio(
|
||||
(canvas_width / gcd), (canvas_height / gcd)
|
||||
)
|
||||
self.width = canvas_width
|
||||
self.height = (self.width * self.aspect[1]) / self.aspect[0]
|
||||
self.coefficient_cache: dict[int, int] = {}
|
||||
self.aspect_cache: dict[str, tuple[int, int]] = {}
|
||||
|
||||
def get_aspect(self, coefficient: int) -> tuple[int, int]:
|
||||
return (self.aspect[0] * coefficient, self.aspect[1] * coefficient)
|
||||
|
||||
def get_coefficient(self, camera_count: int) -> int:
|
||||
return self.coefficient_cache.get(camera_count, 2)
|
||||
|
||||
def set_coefficient(self, camera_count: int, coefficient: int) -> None:
|
||||
self.coefficient_cache[camera_count] = coefficient
|
||||
|
||||
def get_camera_aspect(
|
||||
self, cam_name: str, camera_width: int, camera_height: int
|
||||
) -> tuple[int, int]:
|
||||
cached = self.aspect_cache.get(cam_name)
|
||||
|
||||
if cached:
|
||||
return cached
|
||||
|
||||
gcd = math.gcd(camera_width, camera_height)
|
||||
camera_aspect = get_standard_aspect_ratio(
|
||||
camera_width / gcd, camera_height / gcd
|
||||
)
|
||||
self.aspect_cache[cam_name] = camera_aspect
|
||||
return camera_aspect
|
||||
|
||||
|
||||
class FFMpegConverter:
|
||||
def __init__(
|
||||
self,
|
||||
@ -170,6 +229,7 @@ class BirdsEyeFrameManager:
|
||||
self.frame_shape = (height, width)
|
||||
self.yuv_shape = (height * 3 // 2, width)
|
||||
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
|
||||
self.canvas = Canvas(width, height)
|
||||
self.stop_event = stop_event
|
||||
|
||||
# initialize the frame as black and with the Frigate logo
|
||||
@ -318,16 +378,15 @@ class BirdsEyeFrameManager:
|
||||
),
|
||||
)
|
||||
|
||||
canvas_width = self.config.birdseye.width
|
||||
canvas_height = self.config.birdseye.height
|
||||
|
||||
if len(active_cameras) == 1:
|
||||
# show single camera as fullscreen
|
||||
camera = active_cameras_to_add[0]
|
||||
camera_dims = self.cameras[camera]["dimensions"].copy()
|
||||
scaled_width = int(canvas_height * camera_dims[0] / camera_dims[1])
|
||||
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
|
||||
coefficient = (
|
||||
1 if scaled_width <= canvas_width else canvas_width / scaled_width
|
||||
1
|
||||
if scaled_width <= self.canvas.width
|
||||
else self.canvas.width / scaled_width
|
||||
)
|
||||
self.camera_layout = [
|
||||
[
|
||||
@ -337,14 +396,14 @@ class BirdsEyeFrameManager:
|
||||
0,
|
||||
0,
|
||||
int(scaled_width * coefficient),
|
||||
int(canvas_height * coefficient),
|
||||
int(self.canvas.height * coefficient),
|
||||
),
|
||||
)
|
||||
]
|
||||
]
|
||||
else:
|
||||
# calculate optimal layout
|
||||
coefficient = 2
|
||||
coefficient = self.canvas.get_coefficient(len(active_cameras))
|
||||
calculating = True
|
||||
|
||||
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
|
||||
@ -353,7 +412,6 @@ class BirdsEyeFrameManager:
|
||||
return
|
||||
|
||||
layout_candidate = self.calculate_layout(
|
||||
(canvas_width, canvas_height),
|
||||
active_cameras_to_add,
|
||||
coefficient,
|
||||
)
|
||||
@ -367,6 +425,7 @@ class BirdsEyeFrameManager:
|
||||
return
|
||||
|
||||
calculating = False
|
||||
self.canvas.set_coefficient(len(active_cameras), coefficient)
|
||||
|
||||
self.camera_layout = layout_candidate
|
||||
|
||||
@ -378,9 +437,7 @@ class BirdsEyeFrameManager:
|
||||
|
||||
return True
|
||||
|
||||
def calculate_layout(
|
||||
self, canvas, cameras_to_add: list[str], coefficient
|
||||
) -> tuple[any]:
|
||||
def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]:
|
||||
"""Calculate the optimal layout for 2+ cameras."""
|
||||
|
||||
def map_layout(row_height: int):
|
||||
@ -397,23 +454,20 @@ class BirdsEyeFrameManager:
|
||||
x = starting_x
|
||||
for cameras in row:
|
||||
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
|
||||
camera_aspect = cameras[1]
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
scaled_height = int(row_height * 2)
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
scaled_width = int(scaled_height * camera_aspect)
|
||||
starting_x = scaled_width
|
||||
else:
|
||||
scaled_height = row_height
|
||||
scaled_width = int(
|
||||
scaled_height * camera_dims[0] / camera_dims[1]
|
||||
)
|
||||
scaled_width = int(scaled_height * camera_aspect)
|
||||
|
||||
# layout is too large
|
||||
if (
|
||||
x + scaled_width > canvas_width
|
||||
or y + scaled_height > canvas_height
|
||||
x + scaled_width > self.canvas.width
|
||||
or y + scaled_height > self.canvas.height
|
||||
):
|
||||
return 0, 0, None
|
||||
|
||||
@ -425,13 +479,9 @@ class BirdsEyeFrameManager:
|
||||
|
||||
return max_width, y, candidate_layout
|
||||
|
||||
canvas_width = canvas[0]
|
||||
canvas_height = canvas[1]
|
||||
canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient)
|
||||
camera_layout: list[list[any]] = []
|
||||
camera_layout.append([])
|
||||
canvas_gcd = math.gcd(canvas[0], canvas[1])
|
||||
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
|
||||
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
|
||||
starting_x = 0
|
||||
x = starting_x
|
||||
y = 0
|
||||
@ -439,18 +489,9 @@ class BirdsEyeFrameManager:
|
||||
max_y = 0
|
||||
for camera in cameras_to_add:
|
||||
camera_dims = self.cameras[camera]["dimensions"].copy()
|
||||
camera_gcd = math.gcd(camera_dims[0], camera_dims[1])
|
||||
camera_aspect_x = camera_dims[0] / camera_gcd
|
||||
camera_aspect_y = camera_dims[1] / camera_gcd
|
||||
|
||||
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
|
||||
# account for slightly off 16:9 cameras
|
||||
camera_aspect_x = 16
|
||||
camera_aspect_y = 9
|
||||
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
|
||||
# make 4:3 cameras the same relative size as 16:9
|
||||
camera_aspect_x = 12
|
||||
camera_aspect_y = 9
|
||||
camera_aspect_x, camera_aspect_y = self.canvas.get_camera_aspect(
|
||||
camera, camera_dims[0], camera_dims[1]
|
||||
)
|
||||
|
||||
if camera_dims[1] > camera_dims[0]:
|
||||
portrait = True
|
||||
@ -462,10 +503,7 @@ class BirdsEyeFrameManager:
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(
|
||||
camera_aspect_x,
|
||||
camera_aspect_y,
|
||||
),
|
||||
camera_aspect_x / camera_aspect_y,
|
||||
)
|
||||
)
|
||||
|
||||
@ -491,7 +529,7 @@ class BirdsEyeFrameManager:
|
||||
camera_layout[y_i].append(
|
||||
(
|
||||
camera,
|
||||
(camera_aspect_x, camera_aspect_y),
|
||||
camera_aspect_x / camera_aspect_y,
|
||||
)
|
||||
)
|
||||
x += camera_aspect_x
|
||||
@ -499,15 +537,16 @@ class BirdsEyeFrameManager:
|
||||
if y + max_y > canvas_aspect_y:
|
||||
return None
|
||||
|
||||
row_height = int(canvas_height / coefficient)
|
||||
row_height = int(self.canvas.height / coefficient)
|
||||
total_width, total_height, standard_candidate_layout = map_layout(row_height)
|
||||
|
||||
# layout can't be optimized more
|
||||
if total_width / canvas_width >= 0.99:
|
||||
if total_width / self.canvas.width >= 0.99:
|
||||
return standard_candidate_layout
|
||||
|
||||
scale_up_percent = min(
|
||||
1 - (total_width / canvas_width), 1 - (total_height / canvas_height)
|
||||
1 - (total_width / self.canvas.width),
|
||||
1 - (total_height / self.canvas.height),
|
||||
)
|
||||
row_height = int(row_height * (1 + round(scale_up_percent, 1)))
|
||||
_, _, scaled_layout = map_layout(row_height)
|
||||
|
||||
370
frigate/ptz/autotrack.py
Normal file
370
frigate/ptz/autotrack.py
Normal file
@ -0,0 +1,370 @@
|
||||
"""Automatically pan, tilt, and zoom on detected objects via onvif."""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
from functools import partial
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from norfair.camera_motion import MotionEstimator, TranslationTransformationGetter
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.types import CameraMetricsTypes
|
||||
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PtzMotionEstimator:
|
||||
def __init__(self, config: CameraConfig, ptz_stopped) -> None:
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
# homography is nice (zooming) but slow, translation is pan/tilt only but fast.
|
||||
self.norfair_motion_estimator = MotionEstimator(
|
||||
transformations_getter=TranslationTransformationGetter(),
|
||||
min_distance=30,
|
||||
max_points=500,
|
||||
)
|
||||
self.camera_config = config
|
||||
self.coord_transformations = None
|
||||
self.ptz_stopped = ptz_stopped
|
||||
logger.debug(f"Motion estimator init for cam: {config.name}")
|
||||
|
||||
def motion_estimator(self, detections, frame_time, camera_name):
|
||||
if (
|
||||
self.camera_config.onvif.autotracking.enabled
|
||||
and not self.ptz_stopped.is_set()
|
||||
):
|
||||
logger.debug(
|
||||
f"Motion estimator running for {camera_name} - frame time: {frame_time}"
|
||||
)
|
||||
|
||||
frame_id = f"{camera_name}{frame_time}"
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_id, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
|
||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2GRAY_I420)
|
||||
|
||||
# mask out detections for better motion estimation
|
||||
mask = np.ones(frame.shape[:2], frame.dtype)
|
||||
|
||||
detection_boxes = [x[2] for x in detections]
|
||||
for detection in detection_boxes:
|
||||
x1, y1, x2, y2 = detection
|
||||
mask[y1:y2, x1:x2] = 0
|
||||
|
||||
# merge camera config motion mask with detections. Norfair function needs 0,1 mask
|
||||
mask = np.bitwise_and(mask, self.camera_config.motion.mask).clip(max=1)
|
||||
|
||||
# Norfair estimator function needs color so it can convert it right back to gray
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGRA)
|
||||
|
||||
self.coord_transformations = self.norfair_motion_estimator.update(
|
||||
frame, mask
|
||||
)
|
||||
|
||||
self.frame_manager.close(frame_id)
|
||||
|
||||
logger.debug(
|
||||
f"Motion estimator transformation: {self.coord_transformations.rel_to_abs((0,0))}"
|
||||
)
|
||||
|
||||
return self.coord_transformations
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class PtzAutoTrackerThread(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
onvif: OnvifController,
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
stop_event: MpEvent,
|
||||
) -> None:
|
||||
threading.Thread.__init__(self)
|
||||
self.name = "ptz_autotracker"
|
||||
self.ptz_autotracker = PtzAutoTracker(config, onvif, camera_metrics)
|
||||
self.stop_event = stop_event
|
||||
self.config = config
|
||||
|
||||
def run(self):
|
||||
while not self.stop_event.is_set():
|
||||
for camera_name, cam in self.config.cameras.items():
|
||||
if cam.onvif.autotracking.enabled:
|
||||
self.ptz_autotracker.camera_maintenance(camera_name)
|
||||
else:
|
||||
# disabled dynamically by mqtt
|
||||
if self.ptz_autotracker.tracked_object.get(camera_name):
|
||||
self.ptz_autotracker.tracked_object[camera_name] = None
|
||||
self.ptz_autotracker.tracked_object_previous[camera_name] = None
|
||||
time.sleep(1)
|
||||
logger.info("Exiting autotracker...")
|
||||
|
||||
|
||||
class PtzAutoTracker:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
onvif: OnvifController,
|
||||
camera_metrics: CameraMetricsTypes,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.onvif = onvif
|
||||
self.camera_metrics = camera_metrics
|
||||
self.tracked_object: dict[str, object] = {}
|
||||
self.tracked_object_previous: dict[str, object] = {}
|
||||
self.object_types = {}
|
||||
self.required_zones = {}
|
||||
self.move_queues = {}
|
||||
self.move_threads = {}
|
||||
self.autotracker_init = {}
|
||||
|
||||
# if cam is set to autotrack, onvif should be set up
|
||||
for camera_name, cam in self.config.cameras.items():
|
||||
self.autotracker_init[camera_name] = False
|
||||
if cam.onvif.autotracking.enabled:
|
||||
self._autotracker_setup(cam, camera_name)
|
||||
|
||||
def _autotracker_setup(self, cam, camera_name):
|
||||
logger.debug(f"Autotracker init for cam: {camera_name}")
|
||||
|
||||
self.object_types[camera_name] = cam.onvif.autotracking.track
|
||||
self.required_zones[camera_name] = cam.onvif.autotracking.required_zones
|
||||
|
||||
self.tracked_object[camera_name] = None
|
||||
self.tracked_object_previous[camera_name] = None
|
||||
|
||||
self.move_queues[camera_name] = queue.Queue()
|
||||
|
||||
if not self.onvif.cams[camera_name]["init"]:
|
||||
if not self.onvif._init_onvif(camera_name):
|
||||
logger.warning(f"Unable to initialize onvif for {camera_name}")
|
||||
cam.onvif.autotracking.enabled = False
|
||||
self.camera_metrics[camera_name][
|
||||
"ptz_autotracker_enabled"
|
||||
].value = False
|
||||
|
||||
return
|
||||
|
||||
if not self.onvif.cams[camera_name]["relative_fov_supported"]:
|
||||
cam.onvif.autotracking.enabled = False
|
||||
self.camera_metrics[camera_name][
|
||||
"ptz_autotracker_enabled"
|
||||
].value = False
|
||||
logger.warning(
|
||||
f"Disabling autotracking for {camera_name}: FOV relative movement not supported"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
# movement thread per camera
|
||||
if not self.move_threads or not self.move_threads[camera_name]:
|
||||
self.move_threads[camera_name] = threading.Thread(
|
||||
name=f"move_thread_{camera_name}",
|
||||
target=partial(self._process_move_queue, camera_name),
|
||||
)
|
||||
self.move_threads[camera_name].daemon = True
|
||||
self.move_threads[camera_name].start()
|
||||
|
||||
self.autotracker_init[camera_name] = True
|
||||
|
||||
def _process_move_queue(self, camera):
|
||||
while True:
|
||||
try:
|
||||
if self.move_queues[camera].qsize() > 1:
|
||||
# Accumulate values since last moved
|
||||
pan = 0
|
||||
tilt = 0
|
||||
|
||||
while not self.move_queues[camera].empty():
|
||||
queued_pan, queued_tilt = self.move_queues[camera].queue[0]
|
||||
|
||||
# If exceeding the movement range, keep it in the queue and move now
|
||||
if abs(pan + queued_pan) > 1.0 or abs(tilt + queued_tilt) > 1.0:
|
||||
logger.debug("Pan or tilt value exceeds 1.0")
|
||||
break
|
||||
|
||||
queued_pan, queued_tilt = self.move_queues[camera].get()
|
||||
|
||||
pan += queued_pan
|
||||
tilt += queued_tilt
|
||||
else:
|
||||
move_data = self.move_queues[camera].get()
|
||||
pan, tilt = move_data
|
||||
|
||||
# check if ptz is moving
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
# Wait until the camera finishes moving
|
||||
self.camera_metrics[camera]["ptz_stopped"].wait()
|
||||
|
||||
self.onvif._move_relative(camera, pan, tilt, 1)
|
||||
|
||||
# Wait until the camera finishes moving
|
||||
while not self.camera_metrics[camera]["ptz_stopped"].is_set():
|
||||
# check if ptz is moving
|
||||
self.onvif.get_camera_status(camera)
|
||||
time.sleep(1 / (self.config.cameras[camera].detect.fps / 2))
|
||||
|
||||
except queue.Empty:
|
||||
time.sleep(0.1)
|
||||
|
||||
def _enqueue_move(self, camera, pan, tilt):
|
||||
move_data = (pan, tilt)
|
||||
logger.debug(f"enqueue pan: {pan}, enqueue tilt: {tilt}")
|
||||
self.move_queues[camera].put(move_data)
|
||||
|
||||
def _autotrack_move_ptz(self, camera, obj):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
# # frame width and height
|
||||
camera_width = camera_config.frame_shape[1]
|
||||
camera_height = camera_config.frame_shape[0]
|
||||
|
||||
# Normalize coordinates. top right of the fov is (1,1).
|
||||
pan = 0.5 - (obj.obj_data["centroid"][0] / camera_width)
|
||||
tilt = 0.5 - (obj.obj_data["centroid"][1] / camera_height)
|
||||
|
||||
# ideas: check object velocity for camera speed?
|
||||
self._enqueue_move(camera, -pan, tilt)
|
||||
|
||||
def autotrack_object(self, camera, obj):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
if (
|
||||
camera_config.onvif.autotracking.enabled
|
||||
and self.camera_metrics[camera]["ptz_stopped"].is_set()
|
||||
):
|
||||
# either this is a brand new object that's on our camera, has our label, entered the zone, is not a false positive,
|
||||
# and is not initially motionless - or one we're already tracking, which assumes all those things are already true
|
||||
if (
|
||||
# new object
|
||||
self.tracked_object[camera] is None
|
||||
and obj.camera == camera
|
||||
and obj.obj_data["label"] in self.object_types[camera]
|
||||
and set(obj.entered_zones) & set(self.required_zones[camera])
|
||||
and not obj.previous["false_positive"]
|
||||
and not obj.false_positive
|
||||
and self.tracked_object_previous[camera] is None
|
||||
and obj.obj_data["motionless_count"] == 0
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object[camera] = obj
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
if (
|
||||
# already tracking an object
|
||||
self.tracked_object[camera] is not None
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
|
||||
and obj.obj_data["frame_time"]
|
||||
!= self.tracked_object_previous[camera].obj_data["frame_time"]
|
||||
):
|
||||
# don't move the ptz if we're relatively close to the existing box
|
||||
# should we use iou or euclidean distance or both?
|
||||
# distance = math.sqrt((obj.obj_data["centroid"][0] - camera_width/2)**2 + (obj.obj_data["centroid"][1] - obj.camera_height/2)**2)
|
||||
# if distance <= (self.camera_width * .15) or distance <= (self.camera_height * .15)
|
||||
if (
|
||||
intersection_over_union(
|
||||
self.tracked_object_previous[camera].obj_data["box"],
|
||||
obj.obj_data["box"],
|
||||
)
|
||||
> 0.5
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Autotrack: Existing object (move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
if (
|
||||
# The tracker lost an object, so let's check the previous object's region and compare it with the incoming object
|
||||
# If it's within bounds, start tracking that object.
|
||||
# Should we check region (maybe too broad) or expand the previous object's box a bit and check that?
|
||||
self.tracked_object[camera] is None
|
||||
and obj.camera == camera
|
||||
and obj.obj_data["label"] in self.object_types[camera]
|
||||
and not obj.previous["false_positive"]
|
||||
and not obj.false_positive
|
||||
and obj.obj_data["motionless_count"] == 0
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
):
|
||||
if (
|
||||
intersection_over_union(
|
||||
self.tracked_object_previous[camera].obj_data["region"],
|
||||
obj.obj_data["box"],
|
||||
)
|
||||
< 0.2
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: Reacquired object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object[camera] = obj
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
def end_object(self, camera, obj):
|
||||
if self.config.cameras[camera].onvif.autotracking.enabled:
|
||||
if (
|
||||
self.tracked_object[camera] is not None
|
||||
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: End object: {obj.obj_data['id']} {obj.obj_data['box']}"
|
||||
)
|
||||
self.tracked_object[camera] = None
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
def camera_maintenance(self, camera):
|
||||
# calls get_camera_status to check/update ptz movement
|
||||
# returns camera to preset after timeout when tracking is over
|
||||
autotracker_config = self.config.cameras[camera].onvif.autotracking
|
||||
|
||||
if not self.autotracker_init[camera]:
|
||||
self._autotracker_setup(self.config.cameras[camera], camera)
|
||||
# regularly update camera status
|
||||
if not self.camera_metrics[camera]["ptz_stopped"].is_set():
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
# return to preset if tracking is over
|
||||
if (
|
||||
self.tracked_object[camera] is None
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
and (
|
||||
# might want to use a different timestamp here?
|
||||
time.time()
|
||||
- self.tracked_object_previous[camera].obj_data["frame_time"]
|
||||
> autotracker_config.timeout
|
||||
)
|
||||
and autotracker_config.return_preset
|
||||
):
|
||||
self.camera_metrics[camera]["ptz_stopped"].wait()
|
||||
logger.debug(
|
||||
f"Autotrack: Time is {time.time()}, returning to preset: {autotracker_config.return_preset}"
|
||||
)
|
||||
self.onvif._move_to_preset(
|
||||
camera,
|
||||
autotracker_config.return_preset.lower(),
|
||||
)
|
||||
self.tracked_object_previous[camera] = None
|
||||
@ -4,9 +4,11 @@ import logging
|
||||
import site
|
||||
from enum import Enum
|
||||
|
||||
import numpy
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.types import CameraMetricsTypes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -26,8 +28,11 @@ class OnvifCommandEnum(str, Enum):
|
||||
|
||||
|
||||
class OnvifController:
|
||||
def __init__(self, config: FrigateConfig) -> None:
|
||||
def __init__(
|
||||
self, config: FrigateConfig, camera_metrics: dict[str, CameraMetricsTypes]
|
||||
) -> None:
|
||||
self.cams: dict[str, ONVIFCamera] = {}
|
||||
self.camera_metrics = camera_metrics
|
||||
|
||||
for cam_name, cam in config.cameras.items():
|
||||
if not cam.enabled:
|
||||
@ -68,12 +73,51 @@ class OnvifController:
|
||||
ptz = onvif.create_ptz_service()
|
||||
request = ptz.create_type("GetConfigurationOptions")
|
||||
request.ConfigurationToken = profile.PTZConfiguration.token
|
||||
ptz_config = ptz.GetConfigurationOptions(request)
|
||||
|
||||
# setup moving request
|
||||
fov_space_id = next(
|
||||
(
|
||||
i
|
||||
for i, space in enumerate(
|
||||
ptz_config.Spaces.RelativePanTiltTranslationSpace
|
||||
)
|
||||
if "TranslationSpaceFov" in space["URI"]
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
# setup continuous moving request
|
||||
move_request = ptz.create_type("ContinuousMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["move_request"] = move_request
|
||||
|
||||
# setup relative moving request for autotracking
|
||||
move_request = ptz.create_type("RelativeMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
if move_request.Translation is None and fov_space_id is not None:
|
||||
move_request.Translation = ptz.GetStatus(
|
||||
{"ProfileToken": profile.token}
|
||||
).Position
|
||||
move_request.Translation.PanTilt.space = ptz_config["Spaces"][
|
||||
"RelativePanTiltTranslationSpace"
|
||||
][fov_space_id]["URI"]
|
||||
move_request.Translation.Zoom.space = ptz_config["Spaces"][
|
||||
"RelativeZoomTranslationSpace"
|
||||
][0]["URI"]
|
||||
if move_request.Speed is None:
|
||||
move_request.Speed = ptz.GetStatus({"ProfileToken": profile.token}).Position
|
||||
self.cams[camera_name]["relative_move_request"] = move_request
|
||||
|
||||
# setup relative moving request for autotracking
|
||||
move_request = ptz.create_type("AbsoluteMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["absolute_move_request"] = move_request
|
||||
|
||||
# status request for autotracking
|
||||
status_request = ptz.create_type("GetStatus")
|
||||
status_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["status_request"] = status_request
|
||||
|
||||
# setup existing presets
|
||||
try:
|
||||
presets: list[dict] = ptz.GetPresets({"ProfileToken": profile.token})
|
||||
@ -94,6 +138,20 @@ class OnvifController:
|
||||
if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:
|
||||
supported_features.append("zoom")
|
||||
|
||||
if ptz_config.Spaces and ptz_config.Spaces.RelativePanTiltTranslationSpace:
|
||||
supported_features.append("pt-r")
|
||||
|
||||
if ptz_config.Spaces and ptz_config.Spaces.RelativeZoomTranslationSpace:
|
||||
supported_features.append("zoom-r")
|
||||
|
||||
if fov_space_id is not None:
|
||||
supported_features.append("pt-r-fov")
|
||||
self.cams[camera_name][
|
||||
"relative_fov_range"
|
||||
] = ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
|
||||
|
||||
self.cams[camera_name]["relative_fov_supported"] = fov_space_id is not None
|
||||
|
||||
self.cams[camera_name]["features"] = supported_features
|
||||
|
||||
self.cams[camera_name]["init"] = True
|
||||
@ -143,12 +201,67 @@ class OnvifController:
|
||||
|
||||
onvif.get_service("ptz").ContinuousMove(move_request)
|
||||
|
||||
def _move_relative(self, camera_name: str, pan, tilt, speed) -> None:
|
||||
if not self.cams[camera_name]["relative_fov_supported"]:
|
||||
logger.error(f"{camera_name} does not support ONVIF RelativeMove (FOV).")
|
||||
return
|
||||
|
||||
logger.debug(f"{camera_name} called RelativeMove: pan: {pan} tilt: {tilt}")
|
||||
self.get_camera_status(camera_name)
|
||||
|
||||
if self.cams[camera_name]["active"]:
|
||||
logger.warning(
|
||||
f"{camera_name} is already performing an action, not moving..."
|
||||
)
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].clear()
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["relative_move_request"]
|
||||
|
||||
# function takes in -1 to 1 for pan and tilt, interpolate to the values of the camera.
|
||||
# The onvif spec says this can report as +INF and -INF, so this may need to be modified
|
||||
pan = numpy.interp(
|
||||
pan,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Max"],
|
||||
],
|
||||
)
|
||||
tilt = numpy.interp(
|
||||
tilt,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Max"],
|
||||
],
|
||||
)
|
||||
|
||||
move_request.Speed = {
|
||||
"PanTilt": {
|
||||
"x": speed,
|
||||
"y": speed,
|
||||
},
|
||||
"Zoom": 0,
|
||||
}
|
||||
|
||||
move_request.Translation.PanTilt.x = pan
|
||||
move_request.Translation.PanTilt.y = tilt
|
||||
move_request.Translation.Zoom.x = 0
|
||||
|
||||
onvif.get_service("ptz").RelativeMove(move_request)
|
||||
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
def _move_to_preset(self, camera_name: str, preset: str) -> None:
|
||||
if preset not in self.cams[camera_name]["presets"]:
|
||||
logger.error(f"{preset} is not a valid preset for {camera_name}")
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].clear()
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
preset_token = self.cams[camera_name]["presets"][preset]
|
||||
@ -158,6 +271,7 @@ class OnvifController:
|
||||
"PresetToken": preset_token,
|
||||
}
|
||||
)
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].set()
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
def _zoom(self, camera_name: str, command: OnvifCommandEnum) -> None:
|
||||
@ -216,3 +330,30 @@ class OnvifController:
|
||||
"features": self.cams[camera_name]["features"],
|
||||
"presets": list(self.cams[camera_name]["presets"].keys()),
|
||||
}
|
||||
|
||||
def get_camera_status(self, camera_name: str) -> dict[str, any]:
|
||||
if camera_name not in self.cams.keys():
|
||||
logger.error(f"Onvif is not setup for {camera_name}")
|
||||
return {}
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
self._init_onvif(camera_name)
|
||||
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
status_request = self.cams[camera_name]["status_request"]
|
||||
status = onvif.get_service("ptz").GetStatus(status_request)
|
||||
|
||||
if status.MoveStatus.PanTilt == "IDLE" and status.MoveStatus.Zoom == "IDLE":
|
||||
self.cams[camera_name]["active"] = False
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].set()
|
||||
else:
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.camera_metrics[camera_name]["ptz_stopped"].clear()
|
||||
|
||||
return {
|
||||
"pan": status.Position.PanTilt.x,
|
||||
"tilt": status.Position.PanTilt.y,
|
||||
"zoom": status.Position.Zoom.x,
|
||||
"pantilt_moving": status.MoveStatus.PanTilt,
|
||||
"zoom_moving": status.MoveStatus.Zoom,
|
||||
}
|
||||
@ -3,7 +3,6 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import queue
|
||||
import random
|
||||
@ -15,13 +14,15 @@ from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
from typing import Any, Tuple
|
||||
|
||||
import faster_fifo as ff
|
||||
import psutil
|
||||
|
||||
from frigate.config import FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import area, get_video_properties
|
||||
from frigate.util.image import area
|
||||
from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -30,7 +31,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
recordings_info_queue: ff.Queue,
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
stop_event: MpEvent,
|
||||
):
|
||||
|
||||
@ -7,6 +7,7 @@ import threading
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
|
||||
import faster_fifo as ff
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from setproctitle import setproctitle
|
||||
|
||||
@ -15,14 +16,14 @@ from frigate.models import Event, Recordings, RecordingsToDelete, Timeline
|
||||
from frigate.record.cleanup import RecordingCleanup
|
||||
from frigate.record.maintainer import RecordingMaintainer
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.util import listen
|
||||
from frigate.util.services import listen
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def manage_recordings(
|
||||
config: FrigateConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
recordings_info_queue: ff.Queue,
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
) -> None:
|
||||
stop_event = mp.Event()
|
||||
|
||||
@ -17,7 +17,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, DRIVER_AMD, DRIVER_ENV_VAR, RECORD_DIR
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.types import CameraMetricsTypes, StatsTrackingTypes
|
||||
from frigate.util import (
|
||||
from frigate.util.services import (
|
||||
get_amd_gpu_stats,
|
||||
get_bandwidth_stats,
|
||||
get_cpu_stats,
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
import unittest
|
||||
|
||||
from frigate.util import clean_camera_user_pass, escape_special_characters
|
||||
from frigate.util.builtin import clean_camera_user_pass, escape_special_characters
|
||||
|
||||
|
||||
class TestUserPassCleanup(unittest.TestCase):
|
||||
|
||||
@ -9,7 +9,7 @@ from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors import DetectorTypeEnum
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.util import deep_merge, load_config_with_no_duplicates
|
||||
from frigate.util.builtin import deep_merge, load_config_with_no_duplicates
|
||||
|
||||
|
||||
class TestConfig(unittest.TestCase):
|
||||
|
||||
@ -3,7 +3,7 @@ from unittest import TestCase, main
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.util import copy_yuv_to_position, get_yuv_crop
|
||||
from frigate.util.image import copy_yuv_to_position, get_yuv_crop
|
||||
|
||||
|
||||
class TestCopyYuvToPosition(TestCase):
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from frigate.util import get_amd_gpu_stats, get_intel_gpu_stats
|
||||
from frigate.util.services import get_amd_gpu_stats, get_intel_gpu_stats
|
||||
|
||||
|
||||
class TestGpuStats(unittest.TestCase):
|
||||
|
||||
@ -5,7 +5,7 @@ import numpy as np
|
||||
from norfair.drawing.color import Palette
|
||||
from norfair.drawing.drawer import Drawer
|
||||
|
||||
from frigate.util import intersection
|
||||
from frigate.util.image import intersection
|
||||
from frigate.video import (
|
||||
get_cluster_boundary,
|
||||
get_cluster_candidates,
|
||||
|
||||
@ -3,7 +3,7 @@ from unittest import TestCase, main
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.util import yuv_region_2_rgb
|
||||
from frigate.util.image import yuv_region_2_rgb
|
||||
|
||||
|
||||
class TestYuvRegion2RGB(TestCase):
|
||||
|
||||
@ -10,7 +10,7 @@ from faster_fifo import Queue
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.events.maintainer import EventTypeEnum
|
||||
from frigate.models import Timeline
|
||||
from frigate.util import to_relative_box
|
||||
from frigate.util.builtin import to_relative_box
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -5,9 +5,10 @@ import numpy as np
|
||||
from norfair import Detection, Drawable, Tracker, draw_boxes
|
||||
from norfair.drawing.drawer import Drawer
|
||||
|
||||
from frigate.config import DetectConfig
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.ptz.autotrack import PtzMotionEstimator
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.util import intersection_over_union
|
||||
from frigate.util.image import intersection_over_union
|
||||
|
||||
|
||||
# Normalizes distance from estimate relative to object size
|
||||
@ -54,12 +55,16 @@ def frigate_distance(detection: Detection, tracked_object) -> float:
|
||||
|
||||
|
||||
class NorfairTracker(ObjectTracker):
|
||||
def __init__(self, config: DetectConfig):
|
||||
def __init__(self, config: CameraConfig, ptz_autotracker_enabled, ptz_stopped):
|
||||
self.tracked_objects = {}
|
||||
self.disappeared = {}
|
||||
self.positions = {}
|
||||
self.max_disappeared = config.max_disappeared
|
||||
self.detect_config = config
|
||||
self.max_disappeared = config.detect.max_disappeared
|
||||
self.camera_config = config
|
||||
self.detect_config = config.detect
|
||||
self.ptz_autotracker_enabled = ptz_autotracker_enabled.value
|
||||
self.ptz_stopped = ptz_stopped
|
||||
self.camera_name = config.name
|
||||
self.track_id_map = {}
|
||||
# TODO: could also initialize a tracker per object class if there
|
||||
# was a good reason to have different distance calculations
|
||||
@ -69,6 +74,8 @@ class NorfairTracker(ObjectTracker):
|
||||
initialization_delay=0,
|
||||
hit_counter_max=self.max_disappeared,
|
||||
)
|
||||
if self.ptz_autotracker_enabled:
|
||||
self.ptz_motion_estimator = PtzMotionEstimator(config, self.ptz_stopped)
|
||||
|
||||
def register(self, track_id, obj):
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
@ -230,7 +237,16 @@ class NorfairTracker(ObjectTracker):
|
||||
)
|
||||
)
|
||||
|
||||
tracked_objects = self.tracker.update(detections=norfair_detections)
|
||||
coord_transformations = None
|
||||
|
||||
if self.ptz_autotracker_enabled:
|
||||
coord_transformations = self.ptz_motion_estimator.motion_estimator(
|
||||
detections, frame_time, self.camera_name
|
||||
)
|
||||
|
||||
tracked_objects = self.tracker.update(
|
||||
detections=norfair_detections, coord_transformations=coord_transformations
|
||||
)
|
||||
|
||||
# update or create new tracks
|
||||
active_ids = []
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from multiprocessing.context import Process
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from multiprocessing.synchronize import Event
|
||||
from typing import Optional, TypedDict
|
||||
|
||||
from faster_fifo import Queue
|
||||
@ -17,6 +18,8 @@ class CameraMetricsTypes(TypedDict):
|
||||
frame_queue: Queue
|
||||
motion_enabled: Synchronized
|
||||
improve_contrast_enabled: Synchronized
|
||||
ptz_autotracker_enabled: Synchronized
|
||||
ptz_stopped: Event
|
||||
motion_threshold: Synchronized
|
||||
motion_contour_area: Synchronized
|
||||
process: Optional[Process]
|
||||
|
||||
251
frigate/util/builtin.py
Normal file
251
frigate/util/builtin.py
Normal file
@ -0,0 +1,251 @@
|
||||
"""Utilities for builtin types manipulation."""
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
import shlex
|
||||
import urllib.parse
|
||||
from collections import Counter
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pytz
|
||||
import yaml
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventsPerSecond:
|
||||
def __init__(self, max_events=1000, last_n_seconds=10):
|
||||
self._start = None
|
||||
self._max_events = max_events
|
||||
self._last_n_seconds = last_n_seconds
|
||||
self._timestamps = []
|
||||
|
||||
def start(self):
|
||||
self._start = datetime.datetime.now().timestamp()
|
||||
|
||||
def update(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
if self._start is None:
|
||||
self._start = now
|
||||
self._timestamps.append(now)
|
||||
# truncate the list when it goes 100 over the max_size
|
||||
if len(self._timestamps) > self._max_events + 100:
|
||||
self._timestamps = self._timestamps[(1 - self._max_events) :]
|
||||
self.expire_timestamps(now)
|
||||
|
||||
def eps(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
if self._start is None:
|
||||
self._start = now
|
||||
# compute the (approximate) events in the last n seconds
|
||||
self.expire_timestamps(now)
|
||||
seconds = min(now - self._start, self._last_n_seconds)
|
||||
# avoid divide by zero
|
||||
if seconds == 0:
|
||||
seconds = 1
|
||||
return len(self._timestamps) / seconds
|
||||
|
||||
# remove aged out timestamps
|
||||
def expire_timestamps(self, now):
|
||||
threshold = now - self._last_n_seconds
|
||||
while self._timestamps and self._timestamps[0] < threshold:
|
||||
del self._timestamps[0]
|
||||
|
||||
|
||||
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
|
||||
"""
|
||||
:param dct1: First dict to merge
|
||||
:param dct2: Second dict to merge
|
||||
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
|
||||
:return: The merge dictionary
|
||||
"""
|
||||
merged = copy.deepcopy(dct1)
|
||||
for k, v2 in dct2.items():
|
||||
if k in merged:
|
||||
v1 = merged[k]
|
||||
if isinstance(v1, dict) and isinstance(v2, Mapping):
|
||||
merged[k] = deep_merge(v1, v2, override)
|
||||
elif isinstance(v1, list) and isinstance(v2, list):
|
||||
if merge_lists:
|
||||
merged[k] = v1 + v2
|
||||
else:
|
||||
if override:
|
||||
merged[k] = copy.deepcopy(v2)
|
||||
else:
|
||||
merged[k] = copy.deepcopy(v2)
|
||||
return merged
|
||||
|
||||
|
||||
def load_config_with_no_duplicates(raw_config) -> dict:
|
||||
"""Get config ensuring duplicate keys are not allowed."""
|
||||
|
||||
# https://stackoverflow.com/a/71751051
|
||||
class PreserveDuplicatesLoader(yaml.loader.Loader):
|
||||
pass
|
||||
|
||||
def map_constructor(loader, node, deep=False):
|
||||
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
|
||||
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
|
||||
key_count = Counter(keys)
|
||||
data = {}
|
||||
for key, val in zip(keys, vals):
|
||||
if key_count[key] > 1:
|
||||
raise ValueError(
|
||||
f"Config input {key} is defined multiple times for the same field, this is not allowed."
|
||||
)
|
||||
else:
|
||||
data[key] = val
|
||||
return data
|
||||
|
||||
PreserveDuplicatesLoader.add_constructor(
|
||||
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
|
||||
)
|
||||
return yaml.load(raw_config, PreserveDuplicatesLoader)
|
||||
|
||||
|
||||
def clean_camera_user_pass(line: str) -> str:
|
||||
"""Removes user and password from line."""
|
||||
if "rtsp://" in line:
|
||||
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
|
||||
else:
|
||||
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
|
||||
|
||||
|
||||
def escape_special_characters(path: str) -> str:
|
||||
"""Cleans reserved characters to encodings for ffmpeg."""
|
||||
try:
|
||||
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
|
||||
pw = found[(found.index(":") + 1) :]
|
||||
return path.replace(pw, urllib.parse.quote_plus(pw))
|
||||
except AttributeError:
|
||||
# path does not have user:pass
|
||||
return path
|
||||
|
||||
|
||||
def get_ffmpeg_arg_list(arg: Any) -> list:
|
||||
"""Use arg if list or convert to list format."""
|
||||
return arg if isinstance(arg, list) else shlex.split(arg)
|
||||
|
||||
|
||||
def load_labels(path, encoding="utf-8"):
|
||||
"""Loads labels from file (with or without index numbers).
|
||||
Args:
|
||||
path: path to label file.
|
||||
encoding: label file encoding.
|
||||
Returns:
|
||||
Dictionary mapping indices to labels.
|
||||
"""
|
||||
with open(path, "r", encoding=encoding) as f:
|
||||
labels = {index: "unknown" for index in range(91)}
|
||||
lines = f.readlines()
|
||||
if not lines:
|
||||
return {}
|
||||
|
||||
if lines[0].split(" ", maxsplit=1)[0].isdigit():
|
||||
pairs = [line.split(" ", maxsplit=1) for line in lines]
|
||||
labels.update({int(index): label.strip() for index, label in pairs})
|
||||
else:
|
||||
labels.update({index: line.strip() for index, line in enumerate(lines)})
|
||||
return labels
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier
|
||||
|
||||
|
||||
def to_relative_box(
|
||||
width: int, height: int, box: Tuple[int, int, int, int]
|
||||
) -> Tuple[int, int, int, int]:
|
||||
return (
|
||||
box[0] / width, # x
|
||||
box[1] / height, # y
|
||||
(box[2] - box[0]) / width, # w
|
||||
(box[3] - box[1]) / height, # h
|
||||
)
|
||||
|
||||
|
||||
def create_mask(frame_shape, mask):
|
||||
mask_img = np.zeros(frame_shape, np.uint8)
|
||||
mask_img[:] = 255
|
||||
|
||||
|
||||
def update_yaml_from_url(file_path, url):
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
|
||||
|
||||
for key_path_str, new_value_list in query_string.items():
|
||||
key_path = key_path_str.split(".")
|
||||
for i in range(len(key_path)):
|
||||
try:
|
||||
index = int(key_path[i])
|
||||
key_path[i] = (key_path[i - 1], index)
|
||||
key_path.pop(i - 1)
|
||||
except ValueError:
|
||||
pass
|
||||
new_value = new_value_list[0]
|
||||
update_yaml_file(file_path, key_path, new_value)
|
||||
|
||||
|
||||
def update_yaml_file(file_path, key_path, new_value):
|
||||
yaml = YAML()
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f)
|
||||
|
||||
data = update_yaml(data, key_path, new_value)
|
||||
|
||||
with open(file_path, "w") as f:
|
||||
yaml.dump(data, f)
|
||||
|
||||
|
||||
def update_yaml(data, key_path, new_value):
|
||||
temp = data
|
||||
for key in key_path[:-1]:
|
||||
if isinstance(key, tuple):
|
||||
if key[0] not in temp:
|
||||
temp[key[0]] = [{}] * max(1, key[1] + 1)
|
||||
elif len(temp[key[0]]) <= key[1]:
|
||||
temp[key[0]] += [{}] * (key[1] - len(temp[key[0]]) + 1)
|
||||
temp = temp[key[0]][key[1]]
|
||||
else:
|
||||
if key not in temp:
|
||||
temp[key] = {}
|
||||
temp = temp[key]
|
||||
|
||||
last_key = key_path[-1]
|
||||
if new_value == "":
|
||||
if isinstance(last_key, tuple):
|
||||
del temp[last_key[0]][last_key[1]]
|
||||
else:
|
||||
del temp[last_key]
|
||||
else:
|
||||
if isinstance(last_key, tuple):
|
||||
if last_key[0] not in temp:
|
||||
temp[last_key[0]] = [{}] * max(1, last_key[1] + 1)
|
||||
elif len(temp[last_key[0]]) <= last_key[1]:
|
||||
temp[last_key[0]] += [{}] * (last_key[1] - len(temp[last_key[0]]) + 1)
|
||||
temp[last_key[0]][last_key[1]] = new_value
|
||||
else:
|
||||
if (
|
||||
last_key in temp
|
||||
and isinstance(temp[last_key], dict)
|
||||
and isinstance(new_value, dict)
|
||||
):
|
||||
temp[last_key].update(new_value)
|
||||
else:
|
||||
temp[last_key] = new_value
|
||||
|
||||
return data
|
||||
602
frigate/util.py → frigate/util/image.py
Executable file → Normal file
602
frigate/util.py → frigate/util/image.py
Executable file → Normal file
@ -1,83 +1,17 @@
|
||||
import copy
|
||||
"""Utilities for creating and manipulating image frames."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import signal
|
||||
import subprocess as sp
|
||||
import traceback
|
||||
import urllib.parse
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import Counter
|
||||
from collections.abc import Mapping
|
||||
from multiprocessing import shared_memory
|
||||
from typing import Any, AnyStr, Optional, Tuple
|
||||
from typing import AnyStr, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import psutil
|
||||
import py3nvml.py3nvml as nvml
|
||||
import pytz
|
||||
import yaml
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
|
||||
"""
|
||||
:param dct1: First dict to merge
|
||||
:param dct2: Second dict to merge
|
||||
:param override: if same key exists in both dictionaries, should override? otherwise ignore. (default=True)
|
||||
:return: The merge dictionary
|
||||
"""
|
||||
merged = copy.deepcopy(dct1)
|
||||
for k, v2 in dct2.items():
|
||||
if k in merged:
|
||||
v1 = merged[k]
|
||||
if isinstance(v1, dict) and isinstance(v2, Mapping):
|
||||
merged[k] = deep_merge(v1, v2, override)
|
||||
elif isinstance(v1, list) and isinstance(v2, list):
|
||||
if merge_lists:
|
||||
merged[k] = v1 + v2
|
||||
else:
|
||||
if override:
|
||||
merged[k] = copy.deepcopy(v2)
|
||||
else:
|
||||
merged[k] = copy.deepcopy(v2)
|
||||
return merged
|
||||
|
||||
|
||||
def load_config_with_no_duplicates(raw_config) -> dict:
|
||||
"""Get config ensuring duplicate keys are not allowed."""
|
||||
|
||||
# https://stackoverflow.com/a/71751051
|
||||
class PreserveDuplicatesLoader(yaml.loader.Loader):
|
||||
pass
|
||||
|
||||
def map_constructor(loader, node, deep=False):
|
||||
keys = [loader.construct_object(node, deep=deep) for node, _ in node.value]
|
||||
vals = [loader.construct_object(node, deep=deep) for _, node in node.value]
|
||||
key_count = Counter(keys)
|
||||
data = {}
|
||||
for key, val in zip(keys, vals):
|
||||
if key_count[key] > 1:
|
||||
raise ValueError(
|
||||
f"Config input {key} is defined multiple times for the same field, this is not allowed."
|
||||
)
|
||||
else:
|
||||
data[key] = val
|
||||
return data
|
||||
|
||||
PreserveDuplicatesLoader.add_constructor(
|
||||
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, map_constructor
|
||||
)
|
||||
return yaml.load(raw_config, PreserveDuplicatesLoader)
|
||||
|
||||
|
||||
def draw_timestamp(
|
||||
frame,
|
||||
timestamp,
|
||||
@ -639,432 +573,6 @@ def clipped(obj, frame_shape):
|
||||
return False
|
||||
|
||||
|
||||
def restart_frigate():
|
||||
proc = psutil.Process(1)
|
||||
# if this is running via s6, sigterm pid 1
|
||||
if proc.name() == "s6-svscan":
|
||||
proc.terminate()
|
||||
# otherwise, just try and exit frigate
|
||||
else:
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
|
||||
|
||||
class EventsPerSecond:
|
||||
def __init__(self, max_events=1000, last_n_seconds=10):
|
||||
self._start = None
|
||||
self._max_events = max_events
|
||||
self._last_n_seconds = last_n_seconds
|
||||
self._timestamps = []
|
||||
|
||||
def start(self):
|
||||
self._start = datetime.datetime.now().timestamp()
|
||||
|
||||
def update(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
if self._start is None:
|
||||
self._start = now
|
||||
self._timestamps.append(now)
|
||||
# truncate the list when it goes 100 over the max_size
|
||||
if len(self._timestamps) > self._max_events + 100:
|
||||
self._timestamps = self._timestamps[(1 - self._max_events) :]
|
||||
self.expire_timestamps(now)
|
||||
|
||||
def eps(self):
|
||||
now = datetime.datetime.now().timestamp()
|
||||
if self._start is None:
|
||||
self._start = now
|
||||
# compute the (approximate) events in the last n seconds
|
||||
self.expire_timestamps(now)
|
||||
seconds = min(now - self._start, self._last_n_seconds)
|
||||
# avoid divide by zero
|
||||
if seconds == 0:
|
||||
seconds = 1
|
||||
return len(self._timestamps) / seconds
|
||||
|
||||
# remove aged out timestamps
|
||||
def expire_timestamps(self, now):
|
||||
threshold = now - self._last_n_seconds
|
||||
while self._timestamps and self._timestamps[0] < threshold:
|
||||
del self._timestamps[0]
|
||||
|
||||
|
||||
def print_stack(sig, frame):
|
||||
traceback.print_stack(frame)
|
||||
|
||||
|
||||
def listen():
|
||||
signal.signal(signal.SIGUSR1, print_stack)
|
||||
|
||||
|
||||
def create_mask(frame_shape, mask):
|
||||
mask_img = np.zeros(frame_shape, np.uint8)
|
||||
mask_img[:] = 255
|
||||
|
||||
if isinstance(mask, list):
|
||||
for m in mask:
|
||||
add_mask(m, mask_img)
|
||||
|
||||
elif isinstance(mask, str):
|
||||
add_mask(mask, mask_img)
|
||||
|
||||
return mask_img
|
||||
|
||||
|
||||
def add_mask(mask, mask_img):
|
||||
points = mask.split(",")
|
||||
contour = np.array(
|
||||
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
|
||||
)
|
||||
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||
|
||||
|
||||
def load_labels(path, encoding="utf-8"):
|
||||
"""Loads labels from file (with or without index numbers).
|
||||
Args:
|
||||
path: path to label file.
|
||||
encoding: label file encoding.
|
||||
Returns:
|
||||
Dictionary mapping indices to labels.
|
||||
"""
|
||||
with open(path, "r", encoding=encoding) as f:
|
||||
labels = {index: "unknown" for index in range(91)}
|
||||
lines = f.readlines()
|
||||
if not lines:
|
||||
return {}
|
||||
|
||||
if lines[0].split(" ", maxsplit=1)[0].isdigit():
|
||||
pairs = [line.split(" ", maxsplit=1) for line in lines]
|
||||
labels.update({int(index): label.strip() for index, label in pairs})
|
||||
else:
|
||||
labels.update({index: line.strip() for index, line in enumerate(lines)})
|
||||
return labels
|
||||
|
||||
|
||||
def clean_camera_user_pass(line: str) -> str:
|
||||
"""Removes user and password from line."""
|
||||
if "rtsp://" in line:
|
||||
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
|
||||
else:
|
||||
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
|
||||
|
||||
|
||||
def escape_special_characters(path: str) -> str:
|
||||
"""Cleans reserved characters to encodings for ffmpeg."""
|
||||
try:
|
||||
found = re.search(REGEX_RTSP_CAMERA_USER_PASS, path).group(0)[3:-1]
|
||||
pw = found[(found.index(":") + 1) :]
|
||||
return path.replace(pw, urllib.parse.quote_plus(pw))
|
||||
except AttributeError:
|
||||
# path does not have user:pass
|
||||
return path
|
||||
|
||||
|
||||
def get_cgroups_version() -> str:
|
||||
"""Determine what version of cgroups is enabled."""
|
||||
|
||||
cgroup_path = "/sys/fs/cgroup"
|
||||
|
||||
if not os.path.ismount(cgroup_path):
|
||||
logger.debug(f"{cgroup_path} is not a mount point.")
|
||||
return "unknown"
|
||||
|
||||
try:
|
||||
with open("/proc/mounts", "r") as f:
|
||||
mounts = f.readlines()
|
||||
|
||||
for mount in mounts:
|
||||
mount_info = mount.split()
|
||||
if mount_info[1] == cgroup_path:
|
||||
fs_type = mount_info[2]
|
||||
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
|
||||
return "cgroup2"
|
||||
elif fs_type == "tmpfs":
|
||||
return "cgroup"
|
||||
else:
|
||||
logger.debug(
|
||||
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not determine cgroups version: {e}")
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
def get_docker_memlimit_bytes() -> int:
|
||||
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
|
||||
|
||||
# check running a supported cgroups version
|
||||
if get_cgroups_version() == "cgroup2":
|
||||
memlimit_path = "/sys/fs/cgroup/memory.max"
|
||||
|
||||
try:
|
||||
with open(memlimit_path, "r") as f:
|
||||
value = f.read().strip()
|
||||
|
||||
if value.isnumeric():
|
||||
return int(value)
|
||||
elif value.lower() == "max":
|
||||
return -1
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to get docker memlimit: {e}")
|
||||
|
||||
return -1
|
||||
|
||||
|
||||
def get_cpu_stats() -> dict[str, dict]:
|
||||
"""Get cpu usages for each process id"""
|
||||
usages = {}
|
||||
docker_memlimit = get_docker_memlimit_bytes() / 1024
|
||||
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
|
||||
|
||||
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
|
||||
pid = process.info["pid"]
|
||||
try:
|
||||
cpu_percent = process.info["cpu_percent"]
|
||||
cmdline = process.info["cmdline"]
|
||||
|
||||
with open(f"/proc/{pid}/stat", "r") as f:
|
||||
stats = f.readline().split()
|
||||
utime = int(stats[13])
|
||||
stime = int(stats[14])
|
||||
starttime = int(stats[21])
|
||||
|
||||
with open("/proc/uptime") as f:
|
||||
system_uptime_sec = int(float(f.read().split()[0]))
|
||||
|
||||
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
|
||||
|
||||
process_utime_sec = utime // clk_tck
|
||||
process_stime_sec = stime // clk_tck
|
||||
process_starttime_sec = starttime // clk_tck
|
||||
|
||||
process_elapsed_sec = system_uptime_sec - process_starttime_sec
|
||||
process_usage_sec = process_utime_sec + process_stime_sec
|
||||
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
|
||||
|
||||
with open(f"/proc/{pid}/statm", "r") as f:
|
||||
mem_stats = f.readline().split()
|
||||
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
|
||||
|
||||
if docker_memlimit > 0:
|
||||
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
|
||||
else:
|
||||
mem_pct = round((mem_res / total_mem) * 100, 1)
|
||||
|
||||
usages[pid] = {
|
||||
"cpu": str(cpu_percent),
|
||||
"cpu_average": str(round(cpu_average_usage, 2)),
|
||||
"mem": f"{mem_pct}",
|
||||
"cmdline": " ".join(cmdline),
|
||||
}
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return usages
|
||||
|
||||
|
||||
def get_physical_interfaces(interfaces) -> list:
|
||||
with open("/proc/net/dev", "r") as file:
|
||||
lines = file.readlines()
|
||||
|
||||
physical_interfaces = []
|
||||
for line in lines:
|
||||
if ":" in line:
|
||||
interface = line.split(":")[0].strip()
|
||||
for int in interfaces:
|
||||
if interface.startswith(int):
|
||||
physical_interfaces.append(interface)
|
||||
|
||||
return physical_interfaces
|
||||
|
||||
|
||||
def get_bandwidth_stats(config) -> dict[str, dict]:
|
||||
"""Get bandwidth usages for each ffmpeg process id"""
|
||||
usages = {}
|
||||
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
|
||||
config.telemetry.network_interfaces
|
||||
)
|
||||
|
||||
p = sp.run(
|
||||
top_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
if p.returncode != 0:
|
||||
return usages
|
||||
else:
|
||||
lines = p.stdout.split("\n")
|
||||
for line in lines:
|
||||
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
|
||||
try:
|
||||
if re.search(
|
||||
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
|
||||
):
|
||||
process = stats[0].split("/")
|
||||
usages[process[len(process) - 2]] = {
|
||||
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
|
||||
}
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
|
||||
return usages
|
||||
|
||||
|
||||
def get_amd_gpu_stats() -> dict[str, str]:
|
||||
"""Get stats using radeontop."""
|
||||
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
|
||||
|
||||
p = sp.run(
|
||||
radeontop_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
if p.returncode != 0:
|
||||
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
|
||||
return None
|
||||
else:
|
||||
usages = p.stdout.split(",")
|
||||
results: dict[str, str] = {}
|
||||
|
||||
for hw in usages:
|
||||
if "gpu" in hw:
|
||||
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
|
||||
elif "vram" in hw:
|
||||
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_intel_gpu_stats() -> dict[str, str]:
|
||||
"""Get stats using intel_gpu_top."""
|
||||
intel_gpu_top_command = [
|
||||
"timeout",
|
||||
"0.5s",
|
||||
"intel_gpu_top",
|
||||
"-J",
|
||||
"-o",
|
||||
"-",
|
||||
"-s",
|
||||
"1",
|
||||
]
|
||||
|
||||
p = sp.run(
|
||||
intel_gpu_top_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
# timeout has a non-zero returncode when timeout is reached
|
||||
if p.returncode != 124:
|
||||
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
|
||||
return None
|
||||
else:
|
||||
reading = "".join(p.stdout.split())
|
||||
results: dict[str, str] = {}
|
||||
|
||||
# render is used for qsv
|
||||
render = []
|
||||
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
|
||||
packet = json.loads(result[14:])
|
||||
single = packet.get("busy", 0.0)
|
||||
render.append(float(single))
|
||||
|
||||
if render:
|
||||
render_avg = sum(render) / len(render)
|
||||
else:
|
||||
render_avg = 1
|
||||
|
||||
# video is used for vaapi
|
||||
video = []
|
||||
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
|
||||
packet = json.loads(result[10:])
|
||||
single = packet.get("busy", 0.0)
|
||||
video.append(float(single))
|
||||
|
||||
if video:
|
||||
video_avg = sum(video) / len(video)
|
||||
else:
|
||||
video_avg = 1
|
||||
|
||||
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
|
||||
results["mem"] = "-%"
|
||||
return results
|
||||
|
||||
|
||||
def try_get_info(f, h, default="N/A"):
|
||||
try:
|
||||
v = f(h)
|
||||
except nvml.NVMLError_NotSupported:
|
||||
v = default
|
||||
return v
|
||||
|
||||
|
||||
def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
results = {}
|
||||
try:
|
||||
nvml.nvmlInit()
|
||||
deviceCount = nvml.nvmlDeviceGetCount()
|
||||
for i in range(deviceCount):
|
||||
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
||||
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
if util != "N/A":
|
||||
gpu_util = util.gpu
|
||||
else:
|
||||
gpu_util = 0
|
||||
|
||||
if meminfo != "N/A":
|
||||
gpu_mem_util = meminfo.used / meminfo.total * 100
|
||||
else:
|
||||
gpu_mem_util = -1
|
||||
|
||||
results[i] = {
|
||||
"name": nvml.nvmlDeviceGetName(handle),
|
||||
"gpu": gpu_util,
|
||||
"mem": gpu_mem_util,
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
return results
|
||||
|
||||
|
||||
def ffprobe_stream(path: str) -> sp.CompletedProcess:
|
||||
"""Run ffprobe on stream."""
|
||||
clean_path = escape_special_characters(path)
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
|
||||
"-loglevel",
|
||||
"quiet",
|
||||
clean_path,
|
||||
]
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
"""Run vainfo."""
|
||||
ffprobe_cmd = (
|
||||
["vainfo"]
|
||||
if not device_name
|
||||
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
|
||||
)
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
def get_ffmpeg_arg_list(arg: Any) -> list:
|
||||
"""Use arg if list or convert to list format."""
|
||||
return arg if isinstance(arg, list) else shlex.split(arg)
|
||||
|
||||
|
||||
class FrameManager(ABC):
|
||||
@abstractmethod
|
||||
def create(self, name, size) -> AnyStr:
|
||||
@ -1132,89 +640,23 @@ class SharedMemoryFrameManager(FrameManager):
|
||||
del self.shm_store[name]
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
def create_mask(frame_shape, mask):
|
||||
mask_img = np.zeros(frame_shape, np.uint8)
|
||||
mask_img[:] = 255
|
||||
|
||||
if isinstance(mask, list):
|
||||
for m in mask:
|
||||
add_mask(m, mask_img)
|
||||
|
||||
elif isinstance(mask, str):
|
||||
add_mask(mask, mask_img)
|
||||
|
||||
return mask_img
|
||||
|
||||
|
||||
def add_mask(mask, mask_img):
|
||||
points = mask.split(",")
|
||||
contour = np.array(
|
||||
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier
|
||||
|
||||
|
||||
def to_relative_box(
|
||||
width: int, height: int, box: Tuple[int, int, int, int]
|
||||
) -> Tuple[int, int, int, int]:
|
||||
return (
|
||||
box[0] / width, # x
|
||||
box[1] / height, # y
|
||||
(box[2] - box[0]) / width, # w
|
||||
(box[3] - box[1]) / height, # h
|
||||
)
|
||||
|
||||
|
||||
def get_video_properties(url, get_duration=False):
|
||||
def calculate_duration(video: Optional[any]) -> float:
|
||||
duration = None
|
||||
|
||||
if video is not None:
|
||||
# Get the frames per second (fps) of the video stream
|
||||
fps = video.get(cv2.CAP_PROP_FPS)
|
||||
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
|
||||
if fps and total_frames:
|
||||
duration = total_frames / fps
|
||||
|
||||
# if cv2 failed need to use ffprobe
|
||||
if duration is None:
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
f"{url}",
|
||||
]
|
||||
p = sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
if p.returncode == 0 and p.stdout.decode():
|
||||
duration = float(p.stdout.decode().strip())
|
||||
else:
|
||||
duration = -1
|
||||
|
||||
return duration
|
||||
|
||||
width = height = 0
|
||||
|
||||
try:
|
||||
# Open the video stream
|
||||
video = cv2.VideoCapture(url)
|
||||
|
||||
# Check if the video stream was opened successfully
|
||||
if not video.isOpened():
|
||||
video = None
|
||||
except Exception:
|
||||
video = None
|
||||
|
||||
result = {}
|
||||
|
||||
if get_duration:
|
||||
result["duration"] = calculate_duration(video)
|
||||
|
||||
if video is not None:
|
||||
# Get the width of frames in the video stream
|
||||
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
|
||||
# Get the height of frames in the video stream
|
||||
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
|
||||
# Release the video stream
|
||||
video.release()
|
||||
|
||||
result["width"] = round(width)
|
||||
result["height"] = round(height)
|
||||
|
||||
return result
|
||||
cv2.fillPoly(mask_img, pts=[contour], color=(0))
|
||||
403
frigate/util/services.py
Normal file
403
frigate/util/services.py
Normal file
@ -0,0 +1,403 @@
|
||||
"""Utilities for services."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess as sp
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import psutil
|
||||
import py3nvml.py3nvml as nvml
|
||||
|
||||
from frigate.util.builtin import escape_special_characters
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def restart_frigate():
|
||||
proc = psutil.Process(1)
|
||||
# if this is running via s6, sigterm pid 1
|
||||
if proc.name() == "s6-svscan":
|
||||
proc.terminate()
|
||||
# otherwise, just try and exit frigate
|
||||
else:
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
|
||||
|
||||
def print_stack(sig, frame):
|
||||
traceback.print_stack(frame)
|
||||
|
||||
|
||||
def listen():
|
||||
signal.signal(signal.SIGUSR1, print_stack)
|
||||
|
||||
|
||||
def get_cgroups_version() -> str:
|
||||
"""Determine what version of cgroups is enabled."""
|
||||
|
||||
cgroup_path = "/sys/fs/cgroup"
|
||||
|
||||
if not os.path.ismount(cgroup_path):
|
||||
logger.debug(f"{cgroup_path} is not a mount point.")
|
||||
return "unknown"
|
||||
|
||||
try:
|
||||
with open("/proc/mounts", "r") as f:
|
||||
mounts = f.readlines()
|
||||
|
||||
for mount in mounts:
|
||||
mount_info = mount.split()
|
||||
if mount_info[1] == cgroup_path:
|
||||
fs_type = mount_info[2]
|
||||
if fs_type == "cgroup2fs" or fs_type == "cgroup2":
|
||||
return "cgroup2"
|
||||
elif fs_type == "tmpfs":
|
||||
return "cgroup"
|
||||
else:
|
||||
logger.debug(
|
||||
f"Could not determine cgroups version: unhandled filesystem {fs_type}"
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not determine cgroups version: {e}")
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
def get_docker_memlimit_bytes() -> int:
|
||||
"""Get mem limit in bytes set in docker if present. Returns -1 if no limit detected."""
|
||||
|
||||
# check running a supported cgroups version
|
||||
if get_cgroups_version() == "cgroup2":
|
||||
memlimit_path = "/sys/fs/cgroup/memory.max"
|
||||
|
||||
try:
|
||||
with open(memlimit_path, "r") as f:
|
||||
value = f.read().strip()
|
||||
|
||||
if value.isnumeric():
|
||||
return int(value)
|
||||
elif value.lower() == "max":
|
||||
return -1
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to get docker memlimit: {e}")
|
||||
|
||||
return -1
|
||||
|
||||
|
||||
def get_cpu_stats() -> dict[str, dict]:
|
||||
"""Get cpu usages for each process id"""
|
||||
usages = {}
|
||||
docker_memlimit = get_docker_memlimit_bytes() / 1024
|
||||
total_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") / 1024
|
||||
|
||||
for process in psutil.process_iter(["pid", "name", "cpu_percent", "cmdline"]):
|
||||
pid = process.info["pid"]
|
||||
try:
|
||||
cpu_percent = process.info["cpu_percent"]
|
||||
cmdline = process.info["cmdline"]
|
||||
|
||||
with open(f"/proc/{pid}/stat", "r") as f:
|
||||
stats = f.readline().split()
|
||||
utime = int(stats[13])
|
||||
stime = int(stats[14])
|
||||
starttime = int(stats[21])
|
||||
|
||||
with open("/proc/uptime") as f:
|
||||
system_uptime_sec = int(float(f.read().split()[0]))
|
||||
|
||||
clk_tck = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
|
||||
|
||||
process_utime_sec = utime // clk_tck
|
||||
process_stime_sec = stime // clk_tck
|
||||
process_starttime_sec = starttime // clk_tck
|
||||
|
||||
process_elapsed_sec = system_uptime_sec - process_starttime_sec
|
||||
process_usage_sec = process_utime_sec + process_stime_sec
|
||||
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec
|
||||
|
||||
with open(f"/proc/{pid}/statm", "r") as f:
|
||||
mem_stats = f.readline().split()
|
||||
mem_res = int(mem_stats[1]) * os.sysconf("SC_PAGE_SIZE") / 1024
|
||||
|
||||
if docker_memlimit > 0:
|
||||
mem_pct = round((mem_res / docker_memlimit) * 100, 1)
|
||||
else:
|
||||
mem_pct = round((mem_res / total_mem) * 100, 1)
|
||||
|
||||
usages[pid] = {
|
||||
"cpu": str(cpu_percent),
|
||||
"cpu_average": str(round(cpu_average_usage, 2)),
|
||||
"mem": f"{mem_pct}",
|
||||
"cmdline": " ".join(cmdline),
|
||||
}
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return usages
|
||||
|
||||
|
||||
def get_physical_interfaces(interfaces) -> list:
|
||||
with open("/proc/net/dev", "r") as file:
|
||||
lines = file.readlines()
|
||||
|
||||
physical_interfaces = []
|
||||
for line in lines:
|
||||
if ":" in line:
|
||||
interface = line.split(":")[0].strip()
|
||||
for int in interfaces:
|
||||
if interface.startswith(int):
|
||||
physical_interfaces.append(interface)
|
||||
|
||||
return physical_interfaces
|
||||
|
||||
|
||||
def get_bandwidth_stats(config) -> dict[str, dict]:
|
||||
"""Get bandwidth usages for each ffmpeg process id"""
|
||||
usages = {}
|
||||
top_command = ["nethogs", "-t", "-v0", "-c5", "-d1"] + get_physical_interfaces(
|
||||
config.telemetry.network_interfaces
|
||||
)
|
||||
|
||||
p = sp.run(
|
||||
top_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
if p.returncode != 0:
|
||||
return usages
|
||||
else:
|
||||
lines = p.stdout.split("\n")
|
||||
for line in lines:
|
||||
stats = list(filter(lambda a: a != "", line.strip().split("\t")))
|
||||
try:
|
||||
if re.search(
|
||||
r"(^ffmpeg|\/go2rtc|frigate\.detector\.[a-z]+)/([0-9]+)/", stats[0]
|
||||
):
|
||||
process = stats[0].split("/")
|
||||
usages[process[len(process) - 2]] = {
|
||||
"bandwidth": round(float(stats[1]) + float(stats[2]), 1),
|
||||
}
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
|
||||
return usages
|
||||
|
||||
|
||||
def get_amd_gpu_stats() -> dict[str, str]:
|
||||
"""Get stats using radeontop."""
|
||||
radeontop_command = ["radeontop", "-d", "-", "-l", "1"]
|
||||
|
||||
p = sp.run(
|
||||
radeontop_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
if p.returncode != 0:
|
||||
logger.error(f"Unable to poll radeon GPU stats: {p.stderr}")
|
||||
return None
|
||||
else:
|
||||
usages = p.stdout.split(",")
|
||||
results: dict[str, str] = {}
|
||||
|
||||
for hw in usages:
|
||||
if "gpu" in hw:
|
||||
results["gpu"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
|
||||
elif "vram" in hw:
|
||||
results["mem"] = f"{hw.strip().split(' ')[1].replace('%', '')}%"
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_intel_gpu_stats() -> dict[str, str]:
|
||||
"""Get stats using intel_gpu_top."""
|
||||
intel_gpu_top_command = [
|
||||
"timeout",
|
||||
"0.5s",
|
||||
"intel_gpu_top",
|
||||
"-J",
|
||||
"-o",
|
||||
"-",
|
||||
"-s",
|
||||
"1",
|
||||
]
|
||||
|
||||
p = sp.run(
|
||||
intel_gpu_top_command,
|
||||
encoding="ascii",
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
# timeout has a non-zero returncode when timeout is reached
|
||||
if p.returncode != 124:
|
||||
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
|
||||
return None
|
||||
else:
|
||||
reading = "".join(p.stdout.split())
|
||||
results: dict[str, str] = {}
|
||||
|
||||
# render is used for qsv
|
||||
render = []
|
||||
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
|
||||
packet = json.loads(result[14:])
|
||||
single = packet.get("busy", 0.0)
|
||||
render.append(float(single))
|
||||
|
||||
if render:
|
||||
render_avg = sum(render) / len(render)
|
||||
else:
|
||||
render_avg = 1
|
||||
|
||||
# video is used for vaapi
|
||||
video = []
|
||||
for result in re.findall('"Video/\d":{[a-z":\d.,%]+}', reading):
|
||||
packet = json.loads(result[10:])
|
||||
single = packet.get("busy", 0.0)
|
||||
video.append(float(single))
|
||||
|
||||
if video:
|
||||
video_avg = sum(video) / len(video)
|
||||
else:
|
||||
video_avg = 1
|
||||
|
||||
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
|
||||
results["mem"] = "-%"
|
||||
return results
|
||||
|
||||
|
||||
def try_get_info(f, h, default="N/A"):
|
||||
try:
|
||||
v = f(h)
|
||||
except nvml.NVMLError_NotSupported:
|
||||
v = default
|
||||
return v
|
||||
|
||||
|
||||
def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
results = {}
|
||||
try:
|
||||
nvml.nvmlInit()
|
||||
deviceCount = nvml.nvmlDeviceGetCount()
|
||||
for i in range(deviceCount):
|
||||
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
||||
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
if util != "N/A":
|
||||
gpu_util = util.gpu
|
||||
else:
|
||||
gpu_util = 0
|
||||
|
||||
if meminfo != "N/A":
|
||||
gpu_mem_util = meminfo.used / meminfo.total * 100
|
||||
else:
|
||||
gpu_mem_util = -1
|
||||
|
||||
results[i] = {
|
||||
"name": nvml.nvmlDeviceGetName(handle),
|
||||
"gpu": gpu_util,
|
||||
"mem": gpu_mem_util,
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
return results
|
||||
|
||||
|
||||
def ffprobe_stream(path: str) -> sp.CompletedProcess:
|
||||
"""Run ffprobe on stream."""
|
||||
clean_path = escape_special_characters(path)
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-timeout",
|
||||
"1000000",
|
||||
"-print_format",
|
||||
"json",
|
||||
"-show_entries",
|
||||
"stream=codec_long_name,width,height,bit_rate,duration,display_aspect_ratio,avg_frame_rate",
|
||||
"-loglevel",
|
||||
"quiet",
|
||||
clean_path,
|
||||
]
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
"""Run vainfo."""
|
||||
ffprobe_cmd = (
|
||||
["vainfo"]
|
||||
if not device_name
|
||||
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
|
||||
)
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
|
||||
def get_video_properties(url, get_duration=False):
|
||||
def calculate_duration(video: Optional[any]) -> float:
|
||||
duration = None
|
||||
|
||||
if video is not None:
|
||||
# Get the frames per second (fps) of the video stream
|
||||
fps = video.get(cv2.CAP_PROP_FPS)
|
||||
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
|
||||
if fps and total_frames:
|
||||
duration = total_frames / fps
|
||||
|
||||
# if cv2 failed need to use ffprobe
|
||||
if duration is None:
|
||||
ffprobe_cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"default=noprint_wrappers=1:nokey=1",
|
||||
f"{url}",
|
||||
]
|
||||
p = sp.run(ffprobe_cmd, capture_output=True)
|
||||
|
||||
if p.returncode == 0 and p.stdout.decode():
|
||||
duration = float(p.stdout.decode().strip())
|
||||
else:
|
||||
duration = -1
|
||||
|
||||
return duration
|
||||
|
||||
width = height = 0
|
||||
|
||||
try:
|
||||
# Open the video stream
|
||||
video = cv2.VideoCapture(url)
|
||||
|
||||
# Check if the video stream was opened successfully
|
||||
if not video.isOpened():
|
||||
video = None
|
||||
except Exception:
|
||||
video = None
|
||||
|
||||
result = {}
|
||||
|
||||
if get_duration:
|
||||
result["duration"] = calculate_duration(video)
|
||||
|
||||
if video is not None:
|
||||
# Get the width of frames in the video stream
|
||||
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
|
||||
# Get the height of frames in the video stream
|
||||
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
|
||||
# Release the video stream
|
||||
video.release()
|
||||
|
||||
result["width"] = round(width)
|
||||
result["height"] = round(height)
|
||||
|
||||
return result
|
||||
@ -11,10 +11,11 @@ import time
|
||||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
import faster_fifo as ff
|
||||
import numpy as np
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import CameraConfig, DetectConfig
|
||||
from frigate.config import CameraConfig, DetectConfig, ModelConfig
|
||||
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
|
||||
from frigate.detectors.detector_config import PixelFormatEnum
|
||||
from frigate.log import LogPipe
|
||||
@ -23,8 +24,8 @@ from frigate.motion.improved_motion import ImprovedMotionDetector
|
||||
from frigate.object_detection import RemoteObjectDetector
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.norfair_tracker import NorfairTracker
|
||||
from frigate.util import (
|
||||
EventsPerSecond,
|
||||
from frigate.util.builtin import EventsPerSecond
|
||||
from frigate.util.image import (
|
||||
FrameManager,
|
||||
SharedMemoryFrameManager,
|
||||
area,
|
||||
@ -32,11 +33,11 @@ from frigate.util import (
|
||||
draw_box_with_label,
|
||||
intersection,
|
||||
intersection_over_union,
|
||||
listen,
|
||||
yuv_region_2_bgr,
|
||||
yuv_region_2_rgb,
|
||||
yuv_region_2_yuv,
|
||||
)
|
||||
from frigate.util.services import listen
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -95,7 +96,17 @@ def filtered(obj, objects_to_track, object_filters):
|
||||
return False
|
||||
|
||||
|
||||
def create_tensor_input(frame, model_config, region):
|
||||
def get_min_region_size(model_config: ModelConfig) -> int:
|
||||
"""Get the min region size and ensure it is divisible by 4."""
|
||||
half = int(max(model_config.height, model_config.width) / 2)
|
||||
|
||||
if half % 4 == 0:
|
||||
return half
|
||||
|
||||
return int((half + 3) / 4) * 4
|
||||
|
||||
|
||||
def create_tensor_input(frame, model_config: ModelConfig, region):
|
||||
if model_config.input_pixel_format == PixelFormatEnum.rgb:
|
||||
cropped_frame = yuv_region_2_rgb(frame, region)
|
||||
elif model_config.input_pixel_format == PixelFormatEnum.bgr:
|
||||
@ -195,17 +206,16 @@ def capture_frames(
|
||||
|
||||
frame_rate.update()
|
||||
|
||||
# if the queue is full, skip this frame
|
||||
if frame_queue.full():
|
||||
skipped_eps.update()
|
||||
frame_manager.delete(frame_name)
|
||||
continue
|
||||
|
||||
# don't lock the queue to check, just try since it should rarely be full
|
||||
try:
|
||||
# add to the queue
|
||||
frame_queue.put(current_frame.value, False)
|
||||
# close the frame
|
||||
frame_manager.close(frame_name)
|
||||
|
||||
# add to the queue
|
||||
frame_queue.put(current_frame.value)
|
||||
except queue.Full:
|
||||
# if the queue is full, skip this frame
|
||||
skipped_eps.update()
|
||||
frame_manager.delete(frame_name)
|
||||
|
||||
|
||||
class CameraWatchdog(threading.Thread):
|
||||
@ -468,6 +478,8 @@ def track_camera(
|
||||
detection_enabled = process_info["detection_enabled"]
|
||||
motion_enabled = process_info["motion_enabled"]
|
||||
improve_contrast_enabled = process_info["improve_contrast_enabled"]
|
||||
ptz_autotracker_enabled = process_info["ptz_autotracker_enabled"]
|
||||
ptz_stopped = process_info["ptz_stopped"]
|
||||
motion_threshold = process_info["motion_threshold"]
|
||||
motion_contour_area = process_info["motion_contour_area"]
|
||||
|
||||
@ -487,7 +499,7 @@ def track_camera(
|
||||
name, labelmap, detection_queue, result_connection, model_config, stop_event
|
||||
)
|
||||
|
||||
object_tracker = NorfairTracker(config.detect)
|
||||
object_tracker = NorfairTracker(config, ptz_autotracker_enabled, ptz_stopped)
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
|
||||
@ -508,6 +520,7 @@ def track_camera(
|
||||
detection_enabled,
|
||||
motion_enabled,
|
||||
stop_event,
|
||||
ptz_stopped,
|
||||
)
|
||||
|
||||
logger.info(f"{name}: exiting subprocess")
|
||||
@ -717,21 +730,22 @@ def get_consolidated_object_detections(detected_object_groups):
|
||||
|
||||
def process_frames(
|
||||
camera_name: str,
|
||||
frame_queue: mp.Queue,
|
||||
frame_queue: ff.Queue,
|
||||
frame_shape,
|
||||
model_config,
|
||||
model_config: ModelConfig,
|
||||
detect_config: DetectConfig,
|
||||
frame_manager: FrameManager,
|
||||
motion_detector: MotionDetector,
|
||||
object_detector: RemoteObjectDetector,
|
||||
object_tracker: ObjectTracker,
|
||||
detected_objects_queue: mp.Queue,
|
||||
detected_objects_queue: ff.Queue,
|
||||
process_info: dict,
|
||||
objects_to_track: list[str],
|
||||
object_filters,
|
||||
detection_enabled: mp.Value,
|
||||
motion_enabled: mp.Value,
|
||||
stop_event,
|
||||
ptz_stopped: mp.Event,
|
||||
exit_on_empty: bool = False,
|
||||
):
|
||||
fps = process_info["process_fps"]
|
||||
@ -743,16 +757,18 @@ def process_frames(
|
||||
|
||||
startup_scan_counter = 0
|
||||
|
||||
region_min_size = int(max(model_config.height, model_config.width) / 2)
|
||||
region_min_size = get_min_region_size(model_config)
|
||||
|
||||
while not stop_event.is_set():
|
||||
if exit_on_empty and frame_queue.empty():
|
||||
logger.info("Exiting track_objects...")
|
||||
break
|
||||
|
||||
try:
|
||||
if exit_on_empty:
|
||||
frame_time = frame_queue.get(False)
|
||||
else:
|
||||
frame_time = frame_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
if exit_on_empty:
|
||||
logger.info("Exiting track_objects...")
|
||||
break
|
||||
continue
|
||||
|
||||
current_frame_time.value = frame_time
|
||||
@ -766,7 +782,11 @@ def process_frames(
|
||||
continue
|
||||
|
||||
# look for motion if enabled
|
||||
motion_boxes = motion_detector.detect(frame) if motion_enabled.value else []
|
||||
motion_boxes = (
|
||||
motion_detector.detect(frame)
|
||||
if motion_enabled.value and ptz_stopped.is_set()
|
||||
else []
|
||||
)
|
||||
|
||||
regions = []
|
||||
consolidated_detections = []
|
||||
|
||||
@ -5,7 +5,7 @@ import time
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.util import restart_frigate
|
||||
from frigate.util.services import restart_frigate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -1,9 +1,12 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
nvidia-pyindex; platform_machine == 'x86_64'
|
||||
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
|
||||
cuda-python == 11.7; platform_machine == 'x86_64'
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
numpy < 1.24; platform_machine == 'x86_64'
|
||||
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
|
||||
cuda-python == 11.8; platform_machine == 'x86_64'
|
||||
cython == 0.29.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvrtc-cu11 == 11.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
onnx==1.14.0; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
@ -15,6 +15,7 @@ pydantic == 1.10.*
|
||||
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
|
||||
PyYAML == 6.0
|
||||
pytz == 2023.3
|
||||
ruamel.yaml == 0.17.*
|
||||
tzlocal == 5.0.*
|
||||
types-PyYAML == 6.0.*
|
||||
requests == 2.31.*
|
||||
|
||||
@ -23,7 +23,7 @@ export default function CameraControlPanel({ camera = '' }) {
|
||||
return;
|
||||
}
|
||||
|
||||
sendPtz(`preset-${currentPreset}`);
|
||||
sendPtz(`preset_${currentPreset}`);
|
||||
setCurrentPreset('');
|
||||
};
|
||||
|
||||
|
||||
20
web/src/icons/Score.jsx
Normal file
20
web/src/icons/Score.jsx
Normal file
@ -0,0 +1,20 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function Score({ className = 'h-6 w-6', stroke = 'currentColor', fill = 'currentColor', onClick = () => {} }) {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className={className}
|
||||
fill={fill}
|
||||
viewBox="0 0 24 24"
|
||||
stroke={stroke}
|
||||
onClick={onClick}
|
||||
>
|
||||
<title>percent</title>
|
||||
<path d="M12,9A3,3 0 0,0 9,12A3,3 0 0,0 12,15A3,3 0 0,0 15,12A3,3 0 0,0 12,9M19,19H15V21H19A2,2 0 0,0 21,19V15H19M19,3H15V5H19V9H21V5A2,2 0 0,0 19,3M5,5H9V3H5A2,2 0 0,0 3,5V9H5M5,15H3V19A2,2 0 0,0 5,21H9V19H5V15Z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(Score);
|
||||
@ -6,7 +6,7 @@ import Heading from '../components/Heading';
|
||||
import WebRtcPlayer from '../components/WebRtcPlayer';
|
||||
import '../components/MsePlayer';
|
||||
import useSWR from 'swr';
|
||||
import { useMemo } from 'preact/hooks';
|
||||
import { useMemo, useState } from 'preact/hooks';
|
||||
import CameraControlPanel from '../components/CameraControlPanel';
|
||||
import { baseUrl } from '../api/baseUrl';
|
||||
|
||||
@ -26,16 +26,19 @@ export default function Birdseye() {
|
||||
.map(([_, camera]) => camera.name);
|
||||
}, [config]);
|
||||
|
||||
const [isMaxWidth, setIsMaxWidth] = useState(false);
|
||||
|
||||
if (!config || !sourceIsLoaded) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
|
||||
let player;
|
||||
const playerClass = isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full';
|
||||
if (viewSource == 'mse' && config.birdseye.restream) {
|
||||
if ('MediaSource' in window) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ptzCameras.length && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full'}>
|
||||
<video-stream
|
||||
mode="mse"
|
||||
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=birdseye`)}
|
||||
@ -52,10 +55,10 @@ export default function Birdseye() {
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
} else if (viewSource == 'webrtc' && config.birdseye.restream) {
|
||||
} else if (viewSource == 'webrtc' ) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ptzCameras.length && config.birdseye.restream && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full'}>
|
||||
<WebRtcPlayer camera="birdseye" />
|
||||
</div>
|
||||
</Fragment>
|
||||
@ -63,7 +66,7 @@ export default function Birdseye() {
|
||||
} else {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ ptzCameras.length && config.birdseye.restream && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full' }>
|
||||
<JSMpegPlayer camera="birdseye" />
|
||||
</div>
|
||||
</Fragment>
|
||||
@ -77,26 +80,37 @@ export default function Birdseye() {
|
||||
Birdseye
|
||||
</Heading>
|
||||
|
||||
<button
|
||||
className="bg-gray-500 hover:bg-gray-700 text-white font-bold py-2 px-4 rounded hidden md:inline"
|
||||
onClick={() => setIsMaxWidth(!isMaxWidth)}
|
||||
>
|
||||
Toggle width
|
||||
</button>
|
||||
|
||||
{config.birdseye.restream && (
|
||||
<select
|
||||
className="basis-1/8 cursor-pointer rounded dark:bg-slate-800"
|
||||
value={viewSource}
|
||||
onChange={(e) => setViewSource(e.target.value)}
|
||||
key="width-changer"
|
||||
>
|
||||
{sourceValues.map((item) => (
|
||||
<option key={item} value={item}>
|
||||
{item}
|
||||
</option>
|
||||
))}
|
||||
|
||||
</select>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="xl:flex justify-between">
|
||||
<div className={playerClass}> {/* Use dynamic class */}
|
||||
{player}
|
||||
</div>
|
||||
|
||||
{ptzCameras.length ? (
|
||||
<div className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow p-4 w-full sm:w-min xl:h-min xl:w-1/2">
|
||||
<div className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow p-4 sm:w-min xl:h-min {playerClass}">
|
||||
<Heading size="sm">Control Panel</Heading>
|
||||
{ptzCameras.map((camera) => (
|
||||
<div className="p-4" key={camera}>
|
||||
|
||||
@ -7,7 +7,7 @@ import { useResizeObserver } from '../hooks';
|
||||
import { useCallback, useMemo, useRef, useState } from 'preact/hooks';
|
||||
import { useApiHost } from '../api';
|
||||
import useSWR from 'swr';
|
||||
|
||||
import axios from 'axios';
|
||||
export default function CameraMasks({ camera }) {
|
||||
const { data: config } = useSWR('config');
|
||||
const apiHost = useApiHost();
|
||||
@ -95,12 +95,53 @@ export default function CameraMasks({ camera }) {
|
||||
[motionMaskPoints, setMotionMaskPoints]
|
||||
);
|
||||
|
||||
const handleCopyMotionMasks = useCallback(async () => {
|
||||
await window.navigator.clipboard.writeText(` motion:
|
||||
const handleCopyMotionMasks = useCallback(() => {
|
||||
const textToCopy = ` motion:
|
||||
mask:
|
||||
${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`);
|
||||
${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`;
|
||||
|
||||
if (window.navigator.clipboard && window.navigator.clipboard.writeText) {
|
||||
// Use Clipboard API if available
|
||||
window.navigator.clipboard.writeText(textToCopy).catch((err) => {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
});
|
||||
} else {
|
||||
// Fallback to document.execCommand('copy')
|
||||
const textarea = document.createElement('textarea');
|
||||
textarea.value = textToCopy;
|
||||
document.body.appendChild(textarea);
|
||||
textarea.select();
|
||||
|
||||
try {
|
||||
const successful = document.execCommand('copy');
|
||||
if (!successful) {
|
||||
throw new Error('Failed to copy text');
|
||||
}
|
||||
} catch (err) {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
}
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
}
|
||||
}, [motionMaskPoints]);
|
||||
|
||||
const handleSaveMotionMasks = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = motionMaskPoints
|
||||
.map((mask, index) => `cameras.${camera}.motion.mask.${index}=${polylinePointsToPolyline(mask)}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, motionMaskPoints]);
|
||||
|
||||
|
||||
// Zone methods
|
||||
const handleEditZone = useCallback(
|
||||
(key) => {
|
||||
@ -127,15 +168,53 @@ ${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).jo
|
||||
);
|
||||
|
||||
const handleCopyZones = useCallback(async () => {
|
||||
await window.navigator.clipboard.writeText(` zones:
|
||||
const textToCopy = ` zones:
|
||||
${Object.keys(zonePoints)
|
||||
.map(
|
||||
(zoneName) => ` ${zoneName}:
|
||||
coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`
|
||||
)
|
||||
.join('\n')}`);
|
||||
coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`).join('\n')}`;
|
||||
|
||||
if (window.navigator.clipboard && window.navigator.clipboard.writeText) {
|
||||
// Use Clipboard API if available
|
||||
window.navigator.clipboard.writeText(textToCopy).catch((err) => {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
});
|
||||
} else {
|
||||
// Fallback to document.execCommand('copy')
|
||||
const textarea = document.createElement('textarea');
|
||||
textarea.value = textToCopy;
|
||||
document.body.appendChild(textarea);
|
||||
textarea.select();
|
||||
|
||||
try {
|
||||
const successful = document.execCommand('copy');
|
||||
if (!successful) {
|
||||
throw new Error('Failed to copy text');
|
||||
}
|
||||
} catch (err) {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
}
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
}
|
||||
}, [zonePoints]);
|
||||
|
||||
const handleSaveZones = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = Object.keys(zonePoints)
|
||||
.map((zoneName) => `cameras.${camera}.zones.${zoneName}.coordinates=${polylinePointsToPolyline(zonePoints[zoneName])}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, zonePoints]);
|
||||
|
||||
// Object methods
|
||||
const handleEditObjectMask = useCallback(
|
||||
(key, subkey) => {
|
||||
@ -175,6 +254,23 @@ ${Object.keys(objectMaskPoints)
|
||||
.join('\n')}`);
|
||||
}, [objectMaskPoints]);
|
||||
|
||||
const handleSaveObjectMasks = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = Object.keys(objectMaskPoints)
|
||||
.filter((objectName) => objectMaskPoints[objectName].length > 0)
|
||||
.map((objectName, index) => `cameras.${camera}.objects.filters.${objectName}.mask.${index}=${polylinePointsToPolyline(objectMaskPoints[objectName])}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, objectMaskPoints]);
|
||||
|
||||
const handleAddToObjectMask = useCallback(
|
||||
(key) => {
|
||||
const newObjectMaskPoints = { ...objectMaskPoints, [key]: [...objectMaskPoints[key], []] };
|
||||
@ -246,6 +342,7 @@ ${Object.keys(objectMaskPoints)
|
||||
editing={editing}
|
||||
title="Motion masks"
|
||||
onCopy={handleCopyMotionMasks}
|
||||
onSave={handleSaveMotionMasks}
|
||||
onCreate={handleAddMask}
|
||||
onEdit={handleEditMask}
|
||||
onRemove={handleRemoveMask}
|
||||
@ -258,6 +355,7 @@ ${Object.keys(objectMaskPoints)
|
||||
editing={editing}
|
||||
title="Zones"
|
||||
onCopy={handleCopyZones}
|
||||
onSave={handleSaveZones}
|
||||
onCreate={handleAddZone}
|
||||
onEdit={handleEditZone}
|
||||
onRemove={handleRemoveZone}
|
||||
@ -272,6 +370,7 @@ ${Object.keys(objectMaskPoints)
|
||||
title="Object masks"
|
||||
onAdd={handleAddToObjectMask}
|
||||
onCopy={handleCopyObjectMasks}
|
||||
onSave={handleSaveObjectMasks}
|
||||
onCreate={handleAddObjectMask}
|
||||
onEdit={handleEditObjectMask}
|
||||
onRemove={handleRemoveObjectMask}
|
||||
@ -407,6 +506,7 @@ function MaskValues({
|
||||
title,
|
||||
onAdd,
|
||||
onCopy,
|
||||
onSave,
|
||||
onCreate,
|
||||
onEdit,
|
||||
onRemove,
|
||||
@ -455,6 +555,8 @@ function MaskValues({
|
||||
[onAdd]
|
||||
);
|
||||
|
||||
|
||||
|
||||
return (
|
||||
<div className="overflow-hidden" onMouseOver={handleMousein} onMouseOut={handleMouseout}>
|
||||
<div className="flex space-x-4">
|
||||
@ -463,6 +565,7 @@ function MaskValues({
|
||||
</Heading>
|
||||
<Button onClick={onCopy}>Copy</Button>
|
||||
<Button onClick={onCreate}>Add</Button>
|
||||
<Button onClick={onSave}>Save</Button>
|
||||
</div>
|
||||
<pre className="relative overflow-auto font-mono text-gray-900 dark:text-gray-100 rounded bg-gray-100 dark:bg-gray-800 p-2">
|
||||
{yamlPrefix}
|
||||
|
||||
@ -30,6 +30,7 @@ import TimeAgo from '../components/TimeAgo';
|
||||
import Timepicker from '../components/TimePicker';
|
||||
import TimelineSummary from '../components/TimelineSummary';
|
||||
import TimelineEventOverlay from '../components/TimelineEventOverlay';
|
||||
import { Score } from '../icons/Score';
|
||||
import UserViewer from '../components/UserViewer';
|
||||
|
||||
const API_LIMIT = 25;
|
||||
@ -603,13 +604,10 @@ export default function Events({ path, ...props }) {
|
||||
<div className="m-2 flex grow">
|
||||
<div className="flex flex-col grow">
|
||||
<div className="capitalize text-lg font-bold">
|
||||
{event.sub_label
|
||||
? `${event.label.replaceAll('_', ' ')}: ${event.sub_label.replaceAll('_', ' ')}`
|
||||
: event.label.replaceAll('_', ' ')}
|
||||
{(event?.data?.top_score || event.top_score || 0) == 0
|
||||
? null
|
||||
: ` (${((event?.data?.top_score || event.top_score) * 100).toFixed(0)}%)`}
|
||||
{event.label.replaceAll('_', ' ')}
|
||||
{event.sub_label ? `: ${event.sub_label.replaceAll('_', ' ')}` : null}
|
||||
</div>
|
||||
|
||||
<div className="text-sm flex">
|
||||
<Clock className="h-5 w-5 mr-2 inline" />
|
||||
{formatUnixTimestampToDateTime(event.start_time, { ...config.ui })}
|
||||
@ -629,6 +627,15 @@ export default function Events({ path, ...props }) {
|
||||
<Zone className="w-5 h-5 mr-2 inline" />
|
||||
{event.zones.join(', ').replaceAll('_', ' ')}
|
||||
</div>
|
||||
<div className="capitalize text-sm flex align-center">
|
||||
<Score className="w-5 h-5 mr-2 inline" />
|
||||
{(event?.data?.top_score || event.top_score || 0) == 0
|
||||
? null
|
||||
: `Label: ${((event?.data?.top_score || event.top_score) * 100).toFixed(0)}%`}
|
||||
{(event?.data?.sub_label_score || 0) == 0
|
||||
? null
|
||||
: `, Sub Label: ${(event?.data?.sub_label_score * 100).toFixed(0)}%`}
|
||||
</div>
|
||||
</div>
|
||||
<div class="hidden sm:flex flex-col justify-end mr-2">
|
||||
{event.end_time && event.has_snapshot && (
|
||||
|
||||
Loading…
Reference in New Issue
Block a user