mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 18:55:23 +03:00
Merge branch 'dev' of github.com:blakeblackshear/frigate into measure-and-publish-dbfs
This commit is contained in:
commit
08167e8cf7
24
Dockerfile
24
Dockerfile
@ -30,7 +30,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM wget AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.5.0/go2rtc_linux_${TARGETARCH}" \
|
||||
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.6.0/go2rtc_linux_${TARGETARCH}" \
|
||||
&& chmod +x go2rtc
|
||||
|
||||
|
||||
@ -262,15 +262,35 @@ FROM deps AS frigate
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Build TensorRT-specific library
|
||||
FROM nvcr.io/nvidia/tensorrt:23.03-py3 AS trt-deps
|
||||
|
||||
RUN --mount=type=bind,source=docker/support/tensorrt_detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM frigate AS frigate-tensorrt
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
ENV TRT_VER=8.5.3
|
||||
ENV YOLO_MODELS="yolov7-tiny-416"
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/support/tensorrt_detector/rootfs/ /
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \
|
||||
ldconfig
|
||||
|
||||
# Dev Container w/ TRT
|
||||
FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY docker/support/tensorrt_detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
@ -2,10 +2,10 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
NGINX_VERSION="1.22.1"
|
||||
VOD_MODULE_VERSION="1.30"
|
||||
SECURE_TOKEN_MODULE_VERSION="1.4"
|
||||
RTMP_MODULE_VERSION="1.2.1"
|
||||
NGINX_VERSION="1.25.1"
|
||||
VOD_MODULE_VERSION="1.31"
|
||||
SECURE_TOKEN_MODULE_VERSION="1.5"
|
||||
RTMP_MODULE_VERSION="1.2.2"
|
||||
|
||||
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
|
||||
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
|
||||
|
||||
@ -68,7 +68,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
libva-drm2 mesa-va-drivers
|
||||
fi
|
||||
|
||||
apt-get purge gnupg apt-transport-https wget xz-utils -y
|
||||
apt-get purge gnupg apt-transport-https xz-utils -y
|
||||
apt-get clean autoclean -y
|
||||
apt-get autoremove --purge -y
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
/usr/local/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib
|
||||
/usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib
|
||||
@ -0,0 +1,53 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Generate models for the TensorRT detector
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
|
||||
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
|
||||
|
||||
# Create output folder
|
||||
mkdir -p ${OUTPUT_FOLDER}
|
||||
|
||||
FIRST_MODEL=true
|
||||
MODEL_CONVERT=""
|
||||
|
||||
for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
# Remove old link in case path/version changed
|
||||
rm -f ${MODEL_CACHE_DIR}/${model}.trt
|
||||
|
||||
if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then
|
||||
if [[ ${FIRST_MODEL} = true ]]; then
|
||||
MODEL_CONVERT="${model}"
|
||||
FIRST_MODEL=false;
|
||||
else
|
||||
MODEL_CONVERT+=",${model}";
|
||||
fi
|
||||
else
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${MODEL_CONVERT} ]]; then
|
||||
echo "No models to convert."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Generating the following TRT Models: ${MODEL_CONVERT}"
|
||||
|
||||
# Build trt engine
|
||||
cd /usr/local/src/tensorrt_demos/yolo
|
||||
|
||||
# Download yolo weights
|
||||
./download_yolo.sh $MODEL_CONVERT > /dev/null
|
||||
|
||||
for model in ${MODEL_CONVERT//,/ }
|
||||
do
|
||||
echo "Converting ${model} model"
|
||||
python3 yolo_to_onnx.py -m ${model} > /dev/null
|
||||
python3 onnx_to_tensorrt.py -m ${model} > /dev/null
|
||||
cp ${model}.trt ${OUTPUT_FOLDER}/${model}.trt
|
||||
ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt
|
||||
done
|
||||
@ -0,0 +1 @@
|
||||
oneshot
|
||||
@ -0,0 +1 @@
|
||||
/etc/s6-overlay/s6-rc.d/trt-model-prepare/run
|
||||
18
docker/support/tensorrt_detector/tensorrt_libyolo.sh
Executable file
18
docker/support/tensorrt_detector/tensorrt_libyolo.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
SCRIPT_DIR="/usr/local/src/tensorrt_demos"
|
||||
|
||||
# Clone tensorrt_demos repo
|
||||
git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download
|
||||
|
||||
# Build libyolo
|
||||
cd ./tensorrt_demos/plugins && make all
|
||||
cp libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
|
||||
# Store yolo scripts for later conversion
|
||||
cd ../
|
||||
mkdir -p ${SCRIPT_DIR}/plugins
|
||||
cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so
|
||||
cp -a yolo ${SCRIPT_DIR}/
|
||||
@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
CUDA_HOME=/usr/local/cuda
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
|
||||
OUTPUT_FOLDER=/tensorrt_models
|
||||
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
|
||||
|
||||
# Create output folder
|
||||
mkdir -p ${OUTPUT_FOLDER}
|
||||
|
||||
# Install packages
|
||||
pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
|
||||
|
||||
# Clone tensorrt_demos repo
|
||||
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
|
||||
|
||||
# Build libyolo
|
||||
cd /tensorrt_demos/plugins && make all
|
||||
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
|
||||
|
||||
# Download yolo weights
|
||||
cd /tensorrt_demos/yolo && ./download_yolo.sh
|
||||
|
||||
# Build trt engine
|
||||
cd /tensorrt_demos/yolo
|
||||
|
||||
for model in ${YOLO_MODELS//,/ }
|
||||
do
|
||||
python3 yolo_to_onnx.py -m ${model}
|
||||
python3 onnx_to_tensorrt.py -m ${model}
|
||||
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
|
||||
done
|
||||
@ -120,7 +120,7 @@ NOTE: The folder that is mapped from the host needs to be the folder that contai
|
||||
|
||||
## Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.5.0, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.6.0, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
||||
77
docs/docs/configuration/autotracking.md
Normal file
77
docs/docs/configuration/autotracking.md
Normal file
@ -0,0 +1,77 @@
|
||||
---
|
||||
id: autotracking
|
||||
title: Autotracking
|
||||
---
|
||||
|
||||
An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame.
|
||||
|
||||
## Autotracking behavior
|
||||
|
||||
Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it.
|
||||
|
||||
Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will autotrack that new object.
|
||||
|
||||
When tracking has ended, Frigate will return to the camera preset specified by the `return_preset` configuration entry.
|
||||
|
||||
## Checking ONVIF camera support
|
||||
|
||||
Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry).
|
||||
|
||||
Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported.
|
||||
|
||||
Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera.
|
||||
|
||||
## Configuration
|
||||
|
||||
First, set up a PTZ preset in your camera's firmware and give it a name.
|
||||
|
||||
Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset.
|
||||
|
||||
An [ONVIF connection](cameras.md) is required for autotracking to function.
|
||||
|
||||
Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
ptzcamera:
|
||||
...
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
# Optional: username for login.
|
||||
# NOTE: Some devices require admin to access ONVIF.
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
# Optional: enable/disable object autotracking. (default: shown below)
|
||||
enabled: False
|
||||
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
||||
track:
|
||||
- person
|
||||
# Required: Begin automatically tracking an object when it enters any of the listed zones.
|
||||
required_zones:
|
||||
- zone_name
|
||||
# Required: Name of ONVIF camera preset to return to when tracking is over. (default: shown below)
|
||||
return_preset: home
|
||||
# Optional: Seconds to delay before returning to preset. (default: shown below)
|
||||
timeout: 10
|
||||
```
|
||||
|
||||
## Best practices and considerations
|
||||
|
||||
Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR.
|
||||
|
||||
The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases (especially for faster moving objects), the default 5 fps is insufficient for the motion estimator to perform accurately. 10 fps is the current recommendation. Higher frame rates will likely not be more performant and will only slow down Frigate and the motion estimator. Adjust your camera to output at least 10 frames per second and change the `fps` parameter in the [detect configuration](index.md) of your configuration file.
|
||||
|
||||
A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. If Frigate already has trouble keeping track of your object, the autotracker will struggle as well.
|
||||
|
||||
The autotracker will add PTZ motion requests to a queue while the motor is moving. Once the motor stops, the events in the queue will be executed together as one large move (rather than incremental moves). If your PTZ's motor is slow, you may not be able to reliably autotrack fast moving objects.
|
||||
|
||||
## Usage applications
|
||||
|
||||
In security and surveillance, it's common to use "spotter" cameras in combination with your PTZ. When your fixed spotter camera detects an object, you could use an automation platform like Home Assistant to move the PTZ to a specific preset so that Frigate can begin automatically tracking the object. For example: a residence may have fixed cameras on the east and west side of the property, capturing views up and down a street. When the spotter camera on the west side detects a person, a Home Assistant automation could move the PTZ to a camera preset aimed toward the west. When the object enters the specified zone, Frigate's autotracker could then continue to track the person as it moves out of view of any of the fixed cameras.
|
||||
@ -141,7 +141,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect.
|
||||
|
||||
|
||||
@ -66,3 +66,5 @@ cameras:
|
||||
```
|
||||
|
||||
then PTZ controls will be available in the cameras WebUI.
|
||||
|
||||
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs.
|
||||
|
||||
@ -145,6 +145,12 @@ audio:
|
||||
enabled: False
|
||||
# Optional: Configure the amount of seconds without detected audio to end the event (default: shown below)
|
||||
max_not_heard: 30
|
||||
# Optional: Configure the min rms volume required to run audio detection (default: shown below)
|
||||
# As a rule of thumb:
|
||||
# - 200 - high sensitivity
|
||||
# - 500 - medium sensitivity
|
||||
# - 1000 - low sensitivity
|
||||
min_volume: 500
|
||||
# Optional: Types of audio to listen for (default: shown below)
|
||||
listen:
|
||||
- bark
|
||||
@ -412,7 +418,7 @@ rtmp:
|
||||
enabled: False
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.5.0)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.6.0)
|
||||
go2rtc:
|
||||
|
||||
# Optional: jsmpeg stream configuration for WebUI
|
||||
@ -555,6 +561,21 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
# Optional: enable/disable object autotracking. (default: shown below)
|
||||
enabled: False
|
||||
# Optional: list of objects to track from labelmap.txt (default: shown below)
|
||||
track:
|
||||
- person
|
||||
# Required: Begin automatically tracking an object when it enters any of the listed zones.
|
||||
required_zones:
|
||||
- zone_name
|
||||
# Required: Name of ONVIF camera preset to return to when tracking is over.
|
||||
return_preset: preset_name
|
||||
# Optional: Seconds to delay before returning to preset. (default: shown below)
|
||||
timeout: 10
|
||||
|
||||
# Optional: Configuration for how to sort the cameras in the Birdseye view.
|
||||
birdseye:
|
||||
|
||||
@ -115,4 +115,4 @@ services:
|
||||
|
||||
:::
|
||||
|
||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-webrtc) for more information about this.
|
||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#module-webrtc) for more information about this.
|
||||
|
||||
@ -174,9 +174,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t
|
||||
|
||||
### Minimum Hardware Support
|
||||
|
||||
The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support.
|
||||
The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below.
|
||||
|
||||
To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
@ -192,22 +190,15 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben
|
||||
|
||||
### Generate Models
|
||||
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models.
|
||||
The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models.
|
||||
|
||||
To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script.
|
||||
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
|
||||
|
||||
```bash
|
||||
mkdir trt-models
|
||||
wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh
|
||||
chmod +x tensorrt_models.sh
|
||||
docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh
|
||||
```
|
||||
To by default, the `yolov7-tiny-416` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
|
||||
|
||||
The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config.
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it.
|
||||
|
||||
If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it.
|
||||
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below.
|
||||
|
||||
```
|
||||
yolov3-288
|
||||
@ -237,11 +228,20 @@ yolov7x-640
|
||||
yolov7x-320
|
||||
```
|
||||
|
||||
An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this:
|
||||
|
||||
```yml
|
||||
frigate:
|
||||
environment:
|
||||
- YOLO_MODELS="yolov4-608,yolov7x-640"
|
||||
- USE_FP16=false
|
||||
```
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated.
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@ -250,7 +250,7 @@ detectors:
|
||||
device: 0 #This is the default, select the first GPU
|
||||
|
||||
model:
|
||||
path: /trt-models/yolov7-tiny-416.trt
|
||||
path: /config/model_cache/tensorrt/yolov7-tiny-416.trt
|
||||
input_tensor: nchw
|
||||
input_pixel_format: rgb
|
||||
width: 416
|
||||
|
||||
@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.5.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.6.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@ -134,7 +134,7 @@ cameras:
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
|
||||
@ -72,7 +72,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
|
||||
|
||||
### TensorRT
|
||||
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
@ -10,7 +10,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
# Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#module-streams), not just rtsp.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@ -23,7 +23,7 @@ The easiest live view to get working is MSE. After adding this to the config, re
|
||||
|
||||
### What if my video doesn't play?
|
||||
|
||||
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.5.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
|
||||
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.6.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
|
||||
@ -63,7 +63,9 @@ Message published for each changed event. The first message is published when th
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has moved from a stationary position
|
||||
"attributes": [], // set of unique attributes that have been identified on the object
|
||||
"attributes": {
|
||||
"face": 0.64
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [] // detailed data about the current attributes in this frame
|
||||
},
|
||||
"after": {
|
||||
@ -90,13 +92,15 @@ Message published for each changed event. The first message is published when th
|
||||
"stationary": false, // whether or not the object is considered stationary
|
||||
"motionless_count": 0, // number of frames the object has been motionless
|
||||
"position_changes": 2, // number of times the object has changed position
|
||||
"attributes": ["face"], // set of unique attributes that have been identified on the object
|
||||
"attributes": {
|
||||
"face": 0.86
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [
|
||||
// detailed data about the current attributes in this frame
|
||||
{
|
||||
"label": "face",
|
||||
"box": [442, 506, 534, 524],
|
||||
"score": 0.64
|
||||
"score": 0.86
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -188,3 +192,11 @@ Topic to send PTZ commands to camera.
|
||||
| `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] |
|
||||
| `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] |
|
||||
| `STOP` | send command to stop moving |
|
||||
|
||||
### `frigate/<camera_name>/ptz_autotracker/set`
|
||||
|
||||
Topic to turn the PTZ autotracker for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/ptz_autotracker/state`
|
||||
|
||||
Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
@ -27,6 +27,7 @@ from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
CONFIG_DIR,
|
||||
DEFAULT_DB_PATH,
|
||||
DEFAULT_QUEUE_BUFFER_SIZE,
|
||||
EXPORT_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
RECORD_DIR,
|
||||
@ -42,13 +43,13 @@ from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.output import output_frames
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.ptz import OnvifController
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.record.record import manage_recordings
|
||||
from frigate.stats import StatsEmitter, stats_init
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.timeline import TimelineProcessor
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.util.builtin import LimitedQueue as LQueue
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes
|
||||
from frigate.version import VERSION
|
||||
from frigate.video import capture_camera, track_camera
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
@ -67,6 +68,7 @@ class FrigateApp:
|
||||
self.plus_api = PlusApi()
|
||||
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
|
||||
self.feature_metrics: dict[str, FeatureMetricsTypes] = {}
|
||||
self.ptz_metrics: dict[str, PTZMetricsTypes] = {}
|
||||
self.processes: dict[str, int] = {}
|
||||
|
||||
def set_environment_vars(self) -> None:
|
||||
@ -159,10 +161,27 @@ class FrigateApp:
|
||||
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"frame_queue": LQueue(maxsize=2),
|
||||
"frame_queue": mp.Queue(maxsize=2),
|
||||
"capture_process": None,
|
||||
"process": None,
|
||||
}
|
||||
self.ptz_metrics[camera_name] = {
|
||||
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"i",
|
||||
self.config.cameras[camera_name].onvif.autotracking.enabled,
|
||||
),
|
||||
"ptz_stopped": mp.Event(),
|
||||
"ptz_reset": mp.Event(),
|
||||
"ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"ptz_stop_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
}
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].set()
|
||||
self.feature_metrics[camera_name] = {
|
||||
"audio_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
@ -191,22 +210,22 @@ class FrigateApp:
|
||||
|
||||
def init_queues(self) -> None:
|
||||
# Queues for clip processing
|
||||
self.event_queue: Queue = ff.Queue()
|
||||
self.event_processed_queue: Queue = ff.Queue()
|
||||
self.video_output_queue: Queue = LQueue(
|
||||
self.event_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
self.event_processed_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
self.video_output_queue: Queue = mp.Queue(
|
||||
maxsize=len(self.config.cameras.keys()) * 2
|
||||
)
|
||||
|
||||
# Queue for cameras to push tracked objects to
|
||||
self.detected_frames_queue: Queue = LQueue(
|
||||
self.detected_frames_queue: Queue = mp.Queue(
|
||||
maxsize=len(self.config.cameras.keys()) * 2
|
||||
)
|
||||
|
||||
# Queue for recordings info
|
||||
self.recordings_info_queue: Queue = ff.Queue()
|
||||
self.recordings_info_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
|
||||
# Queue for timeline events
|
||||
self.timeline_queue: Queue = ff.Queue()
|
||||
self.timeline_queue: Queue = ff.Queue(DEFAULT_QUEUE_BUFFER_SIZE)
|
||||
|
||||
# Queue for inter process communication
|
||||
self.inter_process_queue: Queue = ff.Queue()
|
||||
@ -317,7 +336,7 @@ class FrigateApp:
|
||||
)
|
||||
|
||||
def init_onvif(self) -> None:
|
||||
self.onvif_controller = OnvifController(self.config)
|
||||
self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
|
||||
|
||||
def init_dispatcher(self) -> None:
|
||||
comms: list[Communicator] = []
|
||||
@ -333,6 +352,7 @@ class FrigateApp:
|
||||
self.onvif_controller,
|
||||
self.camera_metrics,
|
||||
self.feature_metrics,
|
||||
self.ptz_metrics,
|
||||
comms,
|
||||
)
|
||||
|
||||
@ -373,6 +393,15 @@ class FrigateApp:
|
||||
detector_config,
|
||||
)
|
||||
|
||||
def start_ptz_autotracker(self) -> None:
|
||||
self.ptz_autotracker_thread = PtzAutoTrackerThread(
|
||||
self.config,
|
||||
self.onvif_controller,
|
||||
self.ptz_metrics,
|
||||
self.stop_event,
|
||||
)
|
||||
self.ptz_autotracker_thread.start()
|
||||
|
||||
def start_detected_frames_processor(self) -> None:
|
||||
self.detected_frames_processor = TrackedObjectProcessor(
|
||||
self.config,
|
||||
@ -382,6 +411,7 @@ class FrigateApp:
|
||||
self.event_processed_queue,
|
||||
self.video_output_queue,
|
||||
self.recordings_info_queue,
|
||||
self.ptz_autotracker_thread,
|
||||
self.stop_event,
|
||||
)
|
||||
self.detected_frames_processor.start()
|
||||
@ -418,6 +448,7 @@ class FrigateApp:
|
||||
self.detection_out_events[name],
|
||||
self.detected_frames_queue,
|
||||
self.camera_metrics[name],
|
||||
self.ptz_metrics[name],
|
||||
),
|
||||
)
|
||||
camera_process.daemon = True
|
||||
@ -551,6 +582,7 @@ class FrigateApp:
|
||||
sys.exit(1)
|
||||
self.start_detectors()
|
||||
self.start_video_output_processor()
|
||||
self.start_ptz_autotracker()
|
||||
self.start_detected_frames_processor()
|
||||
self.start_camera_processors()
|
||||
self.start_camera_capture_processes()
|
||||
@ -595,6 +627,7 @@ class FrigateApp:
|
||||
|
||||
self.dispatcher.stop()
|
||||
self.detected_frames_processor.join()
|
||||
self.ptz_autotracker_thread.join()
|
||||
self.event_processor.join()
|
||||
self.event_cleanup.join()
|
||||
self.stats_emitter.join()
|
||||
|
||||
@ -5,8 +5,8 @@ from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.ptz import OnvifCommandEnum, OnvifController
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes, PTZMetricsTypes
|
||||
from frigate.util.services import restart_frigate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -40,12 +40,14 @@ class Dispatcher:
|
||||
onvif: OnvifController,
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
feature_metrics: dict[str, FeatureMetricsTypes],
|
||||
ptz_metrics: dict[str, PTZMetricsTypes],
|
||||
communicators: list[Communicator],
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.onvif = onvif
|
||||
self.camera_metrics = camera_metrics
|
||||
self.feature_metrics = feature_metrics
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.comms = communicators
|
||||
|
||||
for comm in self.comms:
|
||||
@ -55,6 +57,7 @@ class Dispatcher:
|
||||
"audio": self._on_audio_command,
|
||||
"detect": self._on_detect_command,
|
||||
"improve_contrast": self._on_motion_improve_contrast_command,
|
||||
"ptz_autotracker": self._on_ptz_autotracker_command,
|
||||
"motion": self._on_motion_command,
|
||||
"motion_contour_area": self._on_motion_contour_area_command,
|
||||
"motion_threshold": self._on_motion_threshold_command,
|
||||
@ -161,6 +164,23 @@ class Dispatcher:
|
||||
|
||||
self.publish(f"{camera_name}/improve_contrast/state", payload, retain=True)
|
||||
|
||||
def _on_ptz_autotracker_command(self, camera_name: str, payload: str) -> None:
|
||||
"""Callback for ptz_autotracker topic."""
|
||||
ptz_autotracker_settings = self.config.cameras[camera_name].onvif.autotracking
|
||||
|
||||
if payload == "ON":
|
||||
if not self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value:
|
||||
logger.info(f"Turning on ptz autotracker for {camera_name}")
|
||||
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = True
|
||||
ptz_autotracker_settings.enabled = True
|
||||
elif payload == "OFF":
|
||||
if self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value:
|
||||
logger.info(f"Turning off ptz autotracker for {camera_name}")
|
||||
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = False
|
||||
ptz_autotracker_settings.enabled = False
|
||||
|
||||
self.publish(f"{camera_name}/ptz_autotracker/state", payload, retain=True)
|
||||
|
||||
def _on_motion_contour_area_command(self, camera_name: str, payload: int) -> None:
|
||||
"""Callback for motion contour topic."""
|
||||
try:
|
||||
|
||||
@ -69,6 +69,11 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"ON" if camera.motion.improve_contrast else "OFF", # type: ignore[union-attr]
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/ptz_autotracker/state",
|
||||
"ON" if camera.onvif.autotracking.enabled else "OFF",
|
||||
retain=True,
|
||||
)
|
||||
self.publish(
|
||||
f"{camera_name}/motion_threshold/state",
|
||||
camera.motion.threshold, # type: ignore[union-attr]
|
||||
@ -152,6 +157,7 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"audio",
|
||||
"motion",
|
||||
"improve_contrast",
|
||||
"ptz_autotracker",
|
||||
"motion_threshold",
|
||||
"motion_contour_area",
|
||||
]
|
||||
|
||||
@ -128,11 +128,31 @@ class MqttConfig(FrigateBaseModel):
|
||||
return v
|
||||
|
||||
|
||||
class PtzAutotrackConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
|
||||
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||
required_zones: List[str] = Field(
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to begin autotracking.",
|
||||
)
|
||||
return_preset: str = Field(
|
||||
default="home",
|
||||
title="Name of camera preset to return to when object tracking is over.",
|
||||
)
|
||||
timeout: int = Field(
|
||||
default=10, title="Seconds to delay before returning to preset."
|
||||
)
|
||||
|
||||
|
||||
class OnvifConfig(FrigateBaseModel):
|
||||
host: str = Field(default="", title="Onvif Host")
|
||||
port: int = Field(default=8000, title="Onvif Port")
|
||||
user: Optional[str] = Field(title="Onvif Username")
|
||||
password: Optional[str] = Field(title="Onvif Password")
|
||||
autotracking: PtzAutotrackConfig = Field(
|
||||
default_factory=PtzAutotrackConfig,
|
||||
title="PTZ auto tracking config.",
|
||||
)
|
||||
|
||||
|
||||
class RetainModeEnum(str, Enum):
|
||||
@ -393,6 +413,9 @@ class AudioConfig(FrigateBaseModel):
|
||||
max_not_heard: int = Field(
|
||||
default=30, title="Seconds of not hearing the type of audio to end the event."
|
||||
)
|
||||
min_volume: int = Field(
|
||||
default=500, title="Min volume required to run audio detection."
|
||||
)
|
||||
listen: List[str] = Field(
|
||||
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
|
||||
)
|
||||
@ -892,6 +915,17 @@ def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
|
||||
)
|
||||
|
||||
|
||||
def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
|
||||
"""Verify that required_zones are specified when autotracking is enabled."""
|
||||
if (
|
||||
camera_config.onvif.autotracking.enabled
|
||||
and not camera_config.onvif.autotracking.required_zones
|
||||
):
|
||||
raise ValueError(
|
||||
f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones."
|
||||
)
|
||||
|
||||
|
||||
class FrigateConfig(FrigateBaseModel):
|
||||
mqtt: MqttConfig = Field(title="MQTT Configuration.")
|
||||
database: DatabaseConfig = Field(
|
||||
@ -1067,6 +1101,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
verify_recording_retention(camera_config)
|
||||
verify_recording_segments_setup_with_reasonable_time(camera_config)
|
||||
verify_zone_objects_are_tracked(camera_config)
|
||||
verify_autotrack_zones(camera_config)
|
||||
|
||||
if camera_config.rtmp.enabled:
|
||||
logger.warning(
|
||||
|
||||
@ -46,3 +46,7 @@ DRIVER_INTEL_iHD = "iHD"
|
||||
|
||||
MAX_SEGMENT_DURATION = 600
|
||||
MAX_PLAYLIST_SECONDS = 7200 # support 2 hour segments for a single playlist to account for cameras with inconsistent segment times
|
||||
|
||||
# Queue Values
|
||||
|
||||
DEFAULT_QUEUE_BUFFER_SIZE = 2000 * 1000 # 2MB
|
||||
|
||||
@ -27,14 +27,17 @@ class EdgeTpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: EdgeTpuDetectorConfig):
|
||||
device_config = {"device": "usb"}
|
||||
device_config = {}
|
||||
if detector_config.device is not None:
|
||||
device_config = {"device": detector_config.device}
|
||||
|
||||
edge_tpu_delegate = None
|
||||
|
||||
try:
|
||||
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||
device_type = (
|
||||
device_config["device"] if "device" in device_config else "auto"
|
||||
)
|
||||
logger.info(f"Attempting to load TPU as {device_type}")
|
||||
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
|
||||
logger.info("TPU found")
|
||||
self.interpreter = Interpreter(
|
||||
|
||||
@ -78,7 +78,7 @@ class TensorRtDetector(DetectionApi):
|
||||
try:
|
||||
trt.init_libnvinfer_plugins(self.trt_logger, "")
|
||||
|
||||
ctypes.cdll.LoadLibrary("/trt-models/libyolo_layer.so")
|
||||
ctypes.cdll.LoadLibrary("/usr/local/lib/libyolo_layer.so")
|
||||
except OSError as e:
|
||||
logger.error(
|
||||
"ERROR: failed to load libraries. %s",
|
||||
|
||||
@ -7,7 +7,7 @@ import os
|
||||
import signal
|
||||
import threading
|
||||
from types import FrameType
|
||||
from typing import Optional
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
@ -176,20 +176,22 @@ class AudioEventMaintainer(threading.Thread):
|
||||
return
|
||||
|
||||
audio_as_float = audio.astype(np.float32)
|
||||
waveform = audio_as_float / AUDIO_MAX_BIT_RANGE
|
||||
model_detections = self.detector.detect(waveform)
|
||||
rms, _ = self.calculate_audio_levels(audio_as_float)
|
||||
|
||||
self.calculate_audio_levels(audio_as_float)
|
||||
# only run audio detection when volume is above min_volume
|
||||
if rms >= self.config.audio.min_volume:
|
||||
waveform = (audio / AUDIO_MAX_BIT_RANGE).astype(np.float32)
|
||||
model_detections = self.detector.detect(waveform)
|
||||
|
||||
for label, score, _ in model_detections:
|
||||
if label not in self.config.audio.listen:
|
||||
continue
|
||||
for label, score, _ in model_detections:
|
||||
if label not in self.config.audio.listen:
|
||||
continue
|
||||
|
||||
self.handle_detection(label, score)
|
||||
self.handle_detection(label, score)
|
||||
|
||||
self.expire_detections()
|
||||
|
||||
def calculate_audio_levels(self, audio_as_float: np.float32) -> None:
|
||||
def calculate_audio_levels(self, audio_as_float: np.float32) -> Tuple[float, float]:
|
||||
# Calculate RMS (Root-Mean-Square) which represents the average signal amplitude
|
||||
# Note: np.float32 isn't serializable, we must use np.float64 to publish the message
|
||||
rms = np.sqrt(np.mean(np.absolute(audio_as_float**2)))
|
||||
@ -204,6 +206,8 @@ class AudioEventMaintainer(threading.Thread):
|
||||
(f"{self.config.name}/audio/rms", float(rms))
|
||||
)
|
||||
|
||||
return float(rms), float(dBFS)
|
||||
|
||||
def handle_detection(self, label: str, score: float) -> None:
|
||||
if self.detections.get(label):
|
||||
self.detections[label][
|
||||
@ -216,7 +220,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
event_id = resp.json()[0]["event_id"]
|
||||
event_id = resp.json()["event_id"]
|
||||
self.detections[label] = {
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
|
||||
@ -199,7 +199,8 @@ class EventProcessor(threading.Thread):
|
||||
|
||||
# only overwrite the sub_label in the database if it's set
|
||||
if event_data.get("sub_label") is not None:
|
||||
event[Event.sub_label] = event_data["sub_label"]
|
||||
event[Event.sub_label] = event_data["sub_label"][0]
|
||||
event[Event.data]["sub_label_score"] = event_data["sub_label"][1]
|
||||
|
||||
(
|
||||
Event.insert(event)
|
||||
|
||||
129
frigate/http.py
129
frigate/http.py
@ -24,21 +24,26 @@ from flask import (
|
||||
make_response,
|
||||
request,
|
||||
)
|
||||
from peewee import DoesNotExist, SqliteDatabase, fn, operator
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.const import CLIPS_DIR, CONFIG_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.models import Event, Recordings, Timeline
|
||||
from frigate.object_processing import TrackedObject
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.ptz import OnvifController
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.record.export import PlaybackFactorEnum, RecordingExporter
|
||||
from frigate.stats import stats_snapshot
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.util.builtin import clean_camera_user_pass, get_tz_modifiers
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
get_tz_modifiers,
|
||||
update_yaml_from_url,
|
||||
)
|
||||
from frigate.util.services import ffprobe_stream, restart_frigate, vainfo_hwaccel
|
||||
from frigate.version import VERSION
|
||||
|
||||
@ -49,7 +54,7 @@ bp = Blueprint("frigate", __name__)
|
||||
|
||||
def create_app(
|
||||
frigate_config,
|
||||
database: SqliteDatabase,
|
||||
database: SqliteQueueDatabase,
|
||||
stats_tracking,
|
||||
detected_frames_processor,
|
||||
storage_maintainer: StorageMaintainer,
|
||||
@ -415,8 +420,8 @@ def get_labels():
|
||||
else:
|
||||
events = Event.select(Event.label).distinct()
|
||||
except Exception as e:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"Failed to get labels: {e}"}), 404
|
||||
)
|
||||
|
||||
labels = sorted([e.label for e in events])
|
||||
@ -430,8 +435,9 @@ def get_sub_labels():
|
||||
try:
|
||||
events = Event.select(Event.sub_label).distinct()
|
||||
except Exception as e:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"Failed to get sub_labels: {e}"}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"Failed to get sub_labels: {e}"}),
|
||||
404,
|
||||
)
|
||||
|
||||
sub_labels = [e.sub_label for e in events]
|
||||
@ -864,12 +870,17 @@ def events():
|
||||
@bp.route("/events/<camera_name>/<label>/create", methods=["POST"])
|
||||
def create_event(camera_name, label):
|
||||
if not camera_name or not current_app.frigate_config.cameras.get(camera_name):
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}, 404
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
if not label:
|
||||
return jsonify({"success": False, "message": f"{label} must be set."}, 404)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"{label} must be set."}), 404
|
||||
)
|
||||
|
||||
json: dict[str, any] = request.get_json(silent=True) or {}
|
||||
|
||||
@ -887,17 +898,19 @@ def create_event(camera_name, label):
|
||||
frame,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"The error is {e}")
|
||||
return jsonify(
|
||||
{"success": False, "message": f"An unknown error occurred: {e}"}, 404
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"An unknown error occurred: {e}"}),
|
||||
404,
|
||||
)
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Successfully created event.",
|
||||
"event_id": event_id,
|
||||
},
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Successfully created event.",
|
||||
"event_id": event_id,
|
||||
}
|
||||
),
|
||||
200,
|
||||
)
|
||||
|
||||
@ -910,11 +923,16 @@ def end_event(event_id):
|
||||
end_time = json.get("end_time", datetime.now().timestamp())
|
||||
current_app.external_processor.finish_manual_event(event_id, end_time)
|
||||
except Exception:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{event_id} must be set and valid."}, 404
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{event_id} must be set and valid."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
return jsonify({"success": True, "message": "Event successfully ended."}, 200)
|
||||
return make_response(
|
||||
jsonify({"success": True, "message": "Event successfully ended."}), 200
|
||||
)
|
||||
|
||||
|
||||
@bp.route("/config")
|
||||
@ -1025,6 +1043,48 @@ def config_save():
|
||||
return "Config successfully saved.", 200
|
||||
|
||||
|
||||
@bp.route("/config/set", methods=["PUT"])
|
||||
def config_set():
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
f.close()
|
||||
|
||||
try:
|
||||
update_yaml_from_url(config_file, request.url)
|
||||
with open(config_file, "r") as f:
|
||||
new_raw_config = f.read()
|
||||
f.close()
|
||||
# Validate the config schema
|
||||
try:
|
||||
FrigateConfig.parse_raw(new_raw_config)
|
||||
except Exception:
|
||||
with open(config_file, "w") as f:
|
||||
f.write(old_raw_config)
|
||||
f.close()
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"\nConfig Error:\n\n{str(traceback.format_exc())}",
|
||||
}
|
||||
),
|
||||
400,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error updating config: {e}")
|
||||
return "Error updating config", 500
|
||||
|
||||
return "Config successfully updated", 200
|
||||
|
||||
|
||||
@bp.route("/config/schema.json")
|
||||
def config_schema():
|
||||
return current_app.response_class(
|
||||
@ -1099,10 +1159,14 @@ def latest_frame(camera_name):
|
||||
frame = current_app.detected_frames_processor.get_current_frame(
|
||||
camera_name, draw_options
|
||||
)
|
||||
retry_interval = float(
|
||||
current_app.frigate_config.cameras.get(camera_name).ffmpeg.retry_interval
|
||||
or 10
|
||||
)
|
||||
|
||||
if frame is None or datetime.now().timestamp() > (
|
||||
current_app.detected_frames_processor.get_current_frame_time(camera_name)
|
||||
+ 10
|
||||
+ retry_interval
|
||||
):
|
||||
if current_app.camera_error_image is None:
|
||||
error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
|
||||
@ -1570,21 +1634,24 @@ def ffprobe():
|
||||
path_param = request.args.get("paths", "")
|
||||
|
||||
if not path_param:
|
||||
return jsonify(
|
||||
{"success": False, "message": "Path needs to be provided."}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Path needs to be provided."}), 404
|
||||
)
|
||||
|
||||
if path_param.startswith("camera"):
|
||||
camera = path_param[7:]
|
||||
|
||||
if camera not in current_app.frigate_config.cameras.keys():
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera} is not a valid camera."}, "404"
|
||||
return make_response(
|
||||
jsonify(
|
||||
{"success": False, "message": f"{camera} is not a valid camera."}
|
||||
),
|
||||
404,
|
||||
)
|
||||
|
||||
if not current_app.frigate_config.cameras[camera].enabled:
|
||||
return jsonify(
|
||||
{"success": False, "message": f"{camera} is not enabled."}, "404"
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"{camera} is not enabled."}), 404
|
||||
)
|
||||
|
||||
paths = map(
|
||||
|
||||
@ -38,7 +38,9 @@ class Event(Model): # type: ignore[misc]
|
||||
IntegerField()
|
||||
) # TODO remove when columns can be dropped without rebuilding table
|
||||
retain_indefinitely = BooleanField(default=False)
|
||||
ratio = FloatField(default=1.0)
|
||||
ratio = FloatField(
|
||||
default=1.0
|
||||
) # TODO remove when columns can be dropped without rebuilding table
|
||||
plus_id = CharField(max_length=30)
|
||||
model_hash = CharField(max_length=32)
|
||||
detector_type = CharField(max_length=32)
|
||||
|
||||
@ -22,6 +22,7 @@ from frigate.config import (
|
||||
)
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.events.maintainer import EventTypeEnum
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
area,
|
||||
@ -111,7 +112,7 @@ class TrackedObject:
|
||||
self.zone_presence = {}
|
||||
self.current_zones = []
|
||||
self.entered_zones = []
|
||||
self.attributes = set()
|
||||
self.attributes = defaultdict(float)
|
||||
self.false_positive = True
|
||||
self.has_clip = False
|
||||
self.has_snapshot = False
|
||||
@ -143,6 +144,7 @@ class TrackedObject:
|
||||
def update(self, current_frame_time, obj_data):
|
||||
thumb_update = False
|
||||
significant_change = False
|
||||
autotracker_update = False
|
||||
# if the object is not in the current frame, add a 0.0 to the score history
|
||||
if obj_data["frame_time"] != current_frame_time:
|
||||
self.score_history.append(0.0)
|
||||
@ -205,15 +207,19 @@ class TrackedObject:
|
||||
|
||||
# maintain attributes
|
||||
for attr in obj_data["attributes"]:
|
||||
self.attributes.add(attr["label"])
|
||||
if self.attributes[attr["label"]] < attr["score"]:
|
||||
self.attributes[attr["label"]] = attr["score"]
|
||||
|
||||
# populate the sub_label for car with first logo if it exists
|
||||
if self.obj_data["label"] == "car" and "sub_label" not in self.obj_data:
|
||||
recognized_logos = self.attributes.intersection(
|
||||
set(["ups", "fedex", "amazon"])
|
||||
)
|
||||
# populate the sub_label for car with highest scoring logo
|
||||
if self.obj_data["label"] == "car":
|
||||
recognized_logos = {
|
||||
k: self.attributes[k]
|
||||
for k in ["ups", "fedex", "amazon"]
|
||||
if k in self.attributes
|
||||
}
|
||||
if len(recognized_logos) > 0:
|
||||
self.obj_data["sub_label"] = recognized_logos.pop()
|
||||
max_logo = max(recognized_logos, key=recognized_logos.get)
|
||||
self.obj_data["sub_label"] = (max_logo, recognized_logos[max_logo])
|
||||
|
||||
# check for significant change
|
||||
if not self.false_positive:
|
||||
@ -236,9 +242,15 @@ class TrackedObject:
|
||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > 60:
|
||||
significant_change = True
|
||||
|
||||
# update autotrack at half fps
|
||||
if self.obj_data["frame_time"] - self.previous["frame_time"] > (
|
||||
1 / (self.camera_config.detect.fps / 2)
|
||||
):
|
||||
autotracker_update = True
|
||||
|
||||
self.obj_data.update(obj_data)
|
||||
self.current_zones = current_zones
|
||||
return (thumb_update, significant_change)
|
||||
return (thumb_update, significant_change, autotracker_update)
|
||||
|
||||
def to_dict(self, include_thumbnail: bool = False):
|
||||
(self.thumbnail_data["frame_time"] if self.thumbnail_data is not None else 0.0)
|
||||
@ -266,7 +278,7 @@ class TrackedObject:
|
||||
"entered_zones": self.entered_zones.copy(),
|
||||
"has_clip": self.has_clip,
|
||||
"has_snapshot": self.has_snapshot,
|
||||
"attributes": list(self.attributes),
|
||||
"attributes": self.attributes,
|
||||
"current_attributes": self.obj_data["attributes"],
|
||||
}
|
||||
|
||||
@ -437,7 +449,11 @@ def zone_filtered(obj: TrackedObject, object_config):
|
||||
# Maintains the state of a camera
|
||||
class CameraState:
|
||||
def __init__(
|
||||
self, name, config: FrigateConfig, frame_manager: SharedMemoryFrameManager
|
||||
self,
|
||||
name,
|
||||
config: FrigateConfig,
|
||||
frame_manager: SharedMemoryFrameManager,
|
||||
ptz_autotracker_thread: PtzAutoTrackerThread,
|
||||
):
|
||||
self.name = name
|
||||
self.config = config
|
||||
@ -455,6 +471,7 @@ class CameraState:
|
||||
self.regions = []
|
||||
self.previous_frame_id = None
|
||||
self.callbacks = defaultdict(list)
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
def get_current_frame(self, draw_options={}):
|
||||
with self.current_frame_lock:
|
||||
@ -476,6 +493,21 @@ class CameraState:
|
||||
thickness = 1
|
||||
color = (255, 0, 0)
|
||||
|
||||
# draw thicker box around ptz autotracked object
|
||||
if (
|
||||
self.camera_config.onvif.autotracking.enabled
|
||||
and self.ptz_autotracker_thread.ptz_autotracker.tracked_object[
|
||||
self.name
|
||||
]
|
||||
is not None
|
||||
and obj["id"]
|
||||
== self.ptz_autotracker_thread.ptz_autotracker.tracked_object[
|
||||
self.name
|
||||
].obj_data["id"]
|
||||
):
|
||||
thickness = 5
|
||||
color = self.config.model.colormap[obj["label"]]
|
||||
|
||||
# draw the bounding boxes on the frame
|
||||
box = obj["box"]
|
||||
draw_box_with_label(
|
||||
@ -589,10 +621,14 @@ class CameraState:
|
||||
|
||||
for id in updated_ids:
|
||||
updated_obj = tracked_objects[id]
|
||||
thumb_update, significant_update = updated_obj.update(
|
||||
thumb_update, significant_update, autotracker_update = updated_obj.update(
|
||||
frame_time, current_detections[id]
|
||||
)
|
||||
|
||||
if autotracker_update or significant_update:
|
||||
for c in self.callbacks["autotrack"]:
|
||||
c(self.name, updated_obj, frame_time)
|
||||
|
||||
if thumb_update:
|
||||
# ensure this frame is stored in the cache
|
||||
if (
|
||||
@ -733,6 +769,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
event_processed_queue,
|
||||
video_output_queue,
|
||||
recordings_info_queue,
|
||||
ptz_autotracker_thread,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
@ -748,6 +785,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.camera_states: dict[str, CameraState] = {}
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.last_motion_detected: dict[str, float] = {}
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
def start(camera, obj: TrackedObject, current_frame_time):
|
||||
self.event_queue.put(
|
||||
@ -774,6 +812,9 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
)
|
||||
)
|
||||
|
||||
def autotrack(camera, obj: TrackedObject, current_frame_time):
|
||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||
|
||||
def end(camera, obj: TrackedObject, current_frame_time):
|
||||
# populate has_snapshot
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
@ -822,6 +863,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
"type": "end",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
self.ptz_autotracker_thread.ptz_autotracker.end_object(camera, obj)
|
||||
|
||||
self.event_queue.put(
|
||||
(
|
||||
@ -858,8 +900,11 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
|
||||
|
||||
for camera in self.config.cameras.keys():
|
||||
camera_state = CameraState(camera, self.config, self.frame_manager)
|
||||
camera_state = CameraState(
|
||||
camera, self.config, self.frame_manager, self.ptz_autotracker_thread
|
||||
)
|
||||
camera_state.on("start", start)
|
||||
camera_state.on("autotrack", autotrack)
|
||||
camera_state.on("update", update)
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
|
||||
443
frigate/ptz/autotrack.py
Normal file
443
frigate/ptz/autotrack.py
Normal file
@ -0,0 +1,443 @@
|
||||
"""Automatically pan, tilt, and zoom on detected objects via onvif."""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import math
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
from functools import partial
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from norfair.camera_motion import MotionEstimator, TranslationTransformationGetter
|
||||
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.types import PTZMetricsTypes
|
||||
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ptz_moving_at_frame_time(frame_time, ptz_start_time, ptz_stop_time):
|
||||
# Determine if the PTZ was in motion at the set frame time
|
||||
# for non ptz/autotracking cameras, this will always return False
|
||||
# ptz_start_time is initialized to 0 on startup and only changes
|
||||
# when autotracking movements are made
|
||||
|
||||
# the offset "primes" the motion estimator with a few frames before movement
|
||||
offset = 0.5
|
||||
|
||||
return (ptz_start_time != 0.0 and frame_time >= ptz_start_time - offset) and (
|
||||
ptz_stop_time == 0.0 or (ptz_start_time - offset <= frame_time <= ptz_stop_time)
|
||||
)
|
||||
|
||||
|
||||
class PtzMotionEstimator:
|
||||
def __init__(
|
||||
self, config: CameraConfig, ptz_metrics: dict[str, PTZMetricsTypes]
|
||||
) -> None:
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.norfair_motion_estimator = None
|
||||
self.camera_config = config
|
||||
self.coord_transformations = None
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.ptz_start_time = self.ptz_metrics["ptz_start_time"]
|
||||
self.ptz_stop_time = self.ptz_metrics["ptz_stop_time"]
|
||||
|
||||
self.ptz_metrics["ptz_reset"].set()
|
||||
logger.debug(f"Motion estimator init for cam: {config.name}")
|
||||
|
||||
def motion_estimator(self, detections, frame_time, camera_name):
|
||||
# If we've just started up or returned to our preset, reset motion estimator for new tracking session
|
||||
if self.ptz_metrics["ptz_reset"].is_set():
|
||||
self.ptz_metrics["ptz_reset"].clear()
|
||||
logger.debug("Motion estimator reset")
|
||||
# homography is nice (zooming) but slow, translation is pan/tilt only but fast.
|
||||
self.norfair_motion_estimator = MotionEstimator(
|
||||
transformations_getter=TranslationTransformationGetter(),
|
||||
min_distance=30,
|
||||
max_points=900,
|
||||
)
|
||||
self.coord_transformations = None
|
||||
|
||||
if ptz_moving_at_frame_time(
|
||||
frame_time, self.ptz_start_time.value, self.ptz_stop_time.value
|
||||
):
|
||||
logger.debug(
|
||||
f"Motion estimator running for {camera_name} - frame time: {frame_time}, {self.ptz_start_time.value}, {self.ptz_stop_time.value}"
|
||||
)
|
||||
|
||||
frame_id = f"{camera_name}{frame_time}"
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_id, self.camera_config.frame_shape_yuv
|
||||
)
|
||||
|
||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2GRAY_I420)
|
||||
|
||||
# mask out detections for better motion estimation
|
||||
mask = np.ones(frame.shape[:2], frame.dtype)
|
||||
|
||||
detection_boxes = [x[2] for x in detections]
|
||||
for detection in detection_boxes:
|
||||
x1, y1, x2, y2 = detection
|
||||
mask[y1:y2, x1:x2] = 0
|
||||
|
||||
# merge camera config motion mask with detections. Norfair function needs 0,1 mask
|
||||
mask = np.bitwise_and(mask, self.camera_config.motion.mask).clip(max=1)
|
||||
|
||||
# Norfair estimator function needs color so it can convert it right back to gray
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGRA)
|
||||
|
||||
self.coord_transformations = self.norfair_motion_estimator.update(
|
||||
frame, mask
|
||||
)
|
||||
|
||||
self.frame_manager.close(frame_id)
|
||||
|
||||
logger.debug(
|
||||
f"Motion estimator transformation: {self.coord_transformations.rel_to_abs((0,0))}"
|
||||
)
|
||||
|
||||
return self.coord_transformations
|
||||
|
||||
|
||||
class PtzAutoTrackerThread(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
onvif: OnvifController,
|
||||
ptz_metrics: dict[str, PTZMetricsTypes],
|
||||
stop_event: MpEvent,
|
||||
) -> None:
|
||||
threading.Thread.__init__(self)
|
||||
self.name = "ptz_autotracker"
|
||||
self.ptz_autotracker = PtzAutoTracker(config, onvif, ptz_metrics)
|
||||
self.stop_event = stop_event
|
||||
self.config = config
|
||||
|
||||
def run(self):
|
||||
while not self.stop_event.is_set():
|
||||
for camera_name, cam in self.config.cameras.items():
|
||||
if cam.onvif.autotracking.enabled:
|
||||
self.ptz_autotracker.camera_maintenance(camera_name)
|
||||
else:
|
||||
# disabled dynamically by mqtt
|
||||
if self.ptz_autotracker.tracked_object.get(camera_name):
|
||||
self.ptz_autotracker.tracked_object[camera_name] = None
|
||||
self.ptz_autotracker.tracked_object_previous[camera_name] = None
|
||||
time.sleep(1)
|
||||
logger.info("Exiting autotracker...")
|
||||
|
||||
|
||||
class PtzAutoTracker:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
onvif: OnvifController,
|
||||
ptz_metrics: PTZMetricsTypes,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.onvif = onvif
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.tracked_object: dict[str, object] = {}
|
||||
self.tracked_object_previous: dict[str, object] = {}
|
||||
self.previous_frame_time = None
|
||||
self.object_types = {}
|
||||
self.required_zones = {}
|
||||
self.move_queues = {}
|
||||
self.move_threads = {}
|
||||
self.autotracker_init = {}
|
||||
|
||||
# if cam is set to autotrack, onvif should be set up
|
||||
for camera_name, cam in self.config.cameras.items():
|
||||
self.autotracker_init[camera_name] = False
|
||||
if cam.onvif.autotracking.enabled:
|
||||
self._autotracker_setup(cam, camera_name)
|
||||
|
||||
def _autotracker_setup(self, cam, camera_name):
|
||||
logger.debug(f"Autotracker init for cam: {camera_name}")
|
||||
|
||||
self.object_types[camera_name] = cam.onvif.autotracking.track
|
||||
self.required_zones[camera_name] = cam.onvif.autotracking.required_zones
|
||||
|
||||
self.tracked_object[camera_name] = None
|
||||
self.tracked_object_previous[camera_name] = None
|
||||
|
||||
self.move_queues[camera_name] = queue.Queue()
|
||||
|
||||
if not self.onvif.cams[camera_name]["init"]:
|
||||
if not self.onvif._init_onvif(camera_name):
|
||||
logger.warning(f"Unable to initialize onvif for {camera_name}")
|
||||
cam.onvif.autotracking.enabled = False
|
||||
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = False
|
||||
|
||||
return
|
||||
|
||||
if not self.onvif.cams[camera_name]["relative_fov_supported"]:
|
||||
cam.onvif.autotracking.enabled = False
|
||||
self.ptz_metrics[camera_name]["ptz_autotracker_enabled"].value = False
|
||||
logger.warning(
|
||||
f"Disabling autotracking for {camera_name}: FOV relative movement not supported"
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
# movement thread per camera
|
||||
if not self.move_threads or not self.move_threads[camera_name]:
|
||||
self.move_threads[camera_name] = threading.Thread(
|
||||
name=f"move_thread_{camera_name}",
|
||||
target=partial(self._process_move_queue, camera_name),
|
||||
)
|
||||
self.move_threads[camera_name].daemon = True
|
||||
self.move_threads[camera_name].start()
|
||||
|
||||
self.autotracker_init[camera_name] = True
|
||||
|
||||
def _process_move_queue(self, camera):
|
||||
while True:
|
||||
try:
|
||||
if self.move_queues[camera].qsize() > 1:
|
||||
# Accumulate values since last moved
|
||||
pan = 0
|
||||
tilt = 0
|
||||
|
||||
while not self.move_queues[camera].empty():
|
||||
frame_time, queued_pan, queued_tilt = self.move_queues[
|
||||
camera
|
||||
].queue[0]
|
||||
|
||||
# if we're receiving move requests during a PTZ move, ignore them
|
||||
if ptz_moving_at_frame_time(
|
||||
frame_time,
|
||||
self.ptz_metrics[camera]["ptz_start_time"].value,
|
||||
self.ptz_metrics[camera]["ptz_stop_time"].value,
|
||||
):
|
||||
self.move_queues[camera].get()
|
||||
|
||||
# instead of dequeueing this might be a good place to preemptively move based
|
||||
# on an estimate - for fast moving objects, etc.
|
||||
logger.debug(
|
||||
f"Move queue: PTZ moving, dequeueing move request - frame time: {frame_time}, queued pan: {queued_pan}, queued tilt: {queued_tilt}, final pan: {pan}, final tilt: {tilt}"
|
||||
)
|
||||
|
||||
else:
|
||||
# TODO: this may need rethinking
|
||||
logger.debug(
|
||||
f"Move queue: PTZ NOT moving, frame time: {frame_time}, queued pan: {queued_pan}, queued tilt: {queued_tilt}, final pan: {pan}, final tilt: {tilt}"
|
||||
)
|
||||
_, queued_pan, queued_tilt = self.move_queues[camera].get()
|
||||
|
||||
# If exceeding the movement range, keep it in the queue and move now
|
||||
if (
|
||||
abs(pan + queued_pan) > 1.0
|
||||
or abs(tilt + queued_tilt) > 1.0
|
||||
):
|
||||
logger.debug("Pan or tilt value exceeds 1.0")
|
||||
break
|
||||
|
||||
pan += queued_pan
|
||||
tilt += queued_tilt
|
||||
|
||||
else:
|
||||
move_data = self.move_queues[camera].get()
|
||||
frame_time, pan, tilt = move_data
|
||||
|
||||
# on some cameras with cheaper motors it seems like small values can cause jerky movement
|
||||
# TODO: double check, might not need this
|
||||
if abs(pan) > 0.02 or abs(tilt) > 0.02:
|
||||
self.onvif._move_relative(camera, pan, tilt, 1)
|
||||
else:
|
||||
logger.debug(f"Not moving, pan and tilt too small: {pan}, {tilt}")
|
||||
|
||||
# Wait until the camera finishes moving
|
||||
while not self.ptz_metrics[camera]["ptz_stopped"].is_set():
|
||||
# check if ptz is moving
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
except queue.Empty:
|
||||
time.sleep(0.1)
|
||||
|
||||
def _enqueue_move(self, camera, frame_time, pan, tilt):
|
||||
move_data = (frame_time, pan, tilt)
|
||||
if (
|
||||
frame_time > self.ptz_metrics[camera]["ptz_start_time"].value
|
||||
and frame_time > self.ptz_metrics[camera]["ptz_stop_time"].value
|
||||
):
|
||||
logger.debug(f"enqueue pan: {pan}, enqueue tilt: {tilt}")
|
||||
self.move_queues[camera].put(move_data)
|
||||
|
||||
def _autotrack_move_ptz(self, camera, obj):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
# # frame width and height
|
||||
camera_width = camera_config.frame_shape[1]
|
||||
camera_height = camera_config.frame_shape[0]
|
||||
|
||||
# Normalize coordinates. top right of the fov is (1,1), center is (0,0), bottom left is (-1, -1).
|
||||
pan = ((obj.obj_data["centroid"][0] / camera_width) - 0.5) * 2
|
||||
tilt = (0.5 - (obj.obj_data["centroid"][1] / camera_height)) * 2
|
||||
|
||||
# ideas: check object velocity for camera speed?
|
||||
self._enqueue_move(camera, obj.obj_data["frame_time"], pan, tilt)
|
||||
|
||||
def autotrack_object(self, camera, obj):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
if camera_config.onvif.autotracking.enabled:
|
||||
if not self.autotracker_init[camera]:
|
||||
self._autotracker_setup(self.config.cameras[camera], camera)
|
||||
|
||||
# either this is a brand new object that's on our camera, has our label, entered the zone, is not a false positive,
|
||||
# and is not initially motionless - or one we're already tracking, which assumes all those things are already true
|
||||
if (
|
||||
# new object
|
||||
self.tracked_object[camera] is None
|
||||
and obj.camera == camera
|
||||
and obj.obj_data["label"] in self.object_types[camera]
|
||||
and set(obj.entered_zones) & set(self.required_zones[camera])
|
||||
and not obj.previous["false_positive"]
|
||||
and not obj.false_positive
|
||||
and self.tracked_object_previous[camera] is None
|
||||
and obj.obj_data["motionless_count"] == 0
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: New object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object[camera] = obj
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self.previous_frame_time = obj.obj_data["frame_time"]
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
if (
|
||||
# already tracking an object
|
||||
self.tracked_object[camera] is not None
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
|
||||
and obj.obj_data["frame_time"] != self.previous_frame_time
|
||||
):
|
||||
self.previous_frame_time = obj.obj_data["frame_time"]
|
||||
# Don't move ptz if Euclidean distance from object to center of frame is
|
||||
# less than 15% of the of the larger dimension (width or height) of the frame,
|
||||
# multiplied by a scaling factor for object size.
|
||||
# Adjusting this percentage slightly lower will effectively cause the camera to move
|
||||
# more often to keep the object in the center. Raising the percentage will cause less
|
||||
# movement and will be more flexible with objects not quite being centered.
|
||||
# TODO: there's probably a better way to approach this
|
||||
distance = math.sqrt(
|
||||
(obj.obj_data["centroid"][0] - camera_config.detect.width / 2) ** 2
|
||||
+ (obj.obj_data["centroid"][1] - camera_config.detect.height / 2)
|
||||
** 2
|
||||
)
|
||||
|
||||
obj_width = obj.obj_data["box"][2] - obj.obj_data["box"][0]
|
||||
obj_height = obj.obj_data["box"][3] - obj.obj_data["box"][1]
|
||||
|
||||
max_obj = max(obj_width, obj_height)
|
||||
max_frame = max(camera_config.detect.width, camera_config.detect.height)
|
||||
|
||||
# larger objects should lower the threshold, smaller objects should raise it
|
||||
scaling_factor = 1 - (max_obj / max_frame)
|
||||
|
||||
distance_threshold = 0.15 * (max_frame) * scaling_factor
|
||||
|
||||
iou = intersection_over_union(
|
||||
self.tracked_object_previous[camera].obj_data["box"],
|
||||
obj.obj_data["box"],
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Distance: {distance}, threshold: {distance_threshold}, iou: {iou}"
|
||||
)
|
||||
|
||||
if distance < distance_threshold and iou > 0.2:
|
||||
logger.debug(
|
||||
f"Autotrack: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Autotrack: Existing object (need to move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
if (
|
||||
# The tracker lost an object, so let's check the previous object's region and compare it with the incoming object
|
||||
# If it's within bounds, start tracking that object.
|
||||
# Should we check region (maybe too broad) or expand the previous object's box a bit and check that?
|
||||
self.tracked_object[camera] is None
|
||||
and obj.camera == camera
|
||||
and obj.obj_data["label"] in self.object_types[camera]
|
||||
and not obj.previous["false_positive"]
|
||||
and not obj.false_positive
|
||||
and obj.obj_data["motionless_count"] == 0
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
):
|
||||
self.previous_frame_time = obj.obj_data["frame_time"]
|
||||
if (
|
||||
intersection_over_union(
|
||||
self.tracked_object_previous[camera].obj_data["region"],
|
||||
obj.obj_data["box"],
|
||||
)
|
||||
< 0.2
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: Reacquired object: {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
|
||||
)
|
||||
self.tracked_object[camera] = obj
|
||||
self.tracked_object_previous[camera] = copy.deepcopy(obj)
|
||||
self._autotrack_move_ptz(camera, obj)
|
||||
|
||||
return
|
||||
|
||||
def end_object(self, camera, obj):
|
||||
if self.config.cameras[camera].onvif.autotracking.enabled:
|
||||
if (
|
||||
self.tracked_object[camera] is not None
|
||||
and obj.obj_data["id"] == self.tracked_object[camera].obj_data["id"]
|
||||
):
|
||||
logger.debug(
|
||||
f"Autotrack: End object: {obj.obj_data['id']} {obj.obj_data['box']}"
|
||||
)
|
||||
self.tracked_object[camera] = None
|
||||
|
||||
def camera_maintenance(self, camera):
|
||||
# calls get_camera_status to check/update ptz movement
|
||||
# returns camera to preset after timeout when tracking is over
|
||||
autotracker_config = self.config.cameras[camera].onvif.autotracking
|
||||
|
||||
if not self.autotracker_init[camera]:
|
||||
self._autotracker_setup(self.config.cameras[camera], camera)
|
||||
# regularly update camera status
|
||||
if not self.ptz_metrics[camera]["ptz_stopped"].is_set():
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
# return to preset if tracking is over
|
||||
if (
|
||||
self.tracked_object[camera] is None
|
||||
and self.tracked_object_previous[camera] is not None
|
||||
and (
|
||||
# might want to use a different timestamp here?
|
||||
time.time()
|
||||
- self.tracked_object_previous[camera].obj_data["frame_time"]
|
||||
> autotracker_config.timeout
|
||||
)
|
||||
and autotracker_config.return_preset
|
||||
):
|
||||
self.ptz_metrics[camera]["ptz_stopped"].wait()
|
||||
logger.debug(
|
||||
f"Autotrack: Time is {time.time()}, returning to preset: {autotracker_config.return_preset}"
|
||||
)
|
||||
self.onvif._move_to_preset(
|
||||
camera,
|
||||
autotracker_config.return_preset.lower(),
|
||||
)
|
||||
self.ptz_metrics[camera]["ptz_reset"].set()
|
||||
self.tracked_object_previous[camera] = None
|
||||
@ -1,12 +1,15 @@
|
||||
"""Configure and control camera via onvif."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import site
|
||||
from enum import Enum
|
||||
|
||||
import numpy
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.types import PTZMetricsTypes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -26,8 +29,11 @@ class OnvifCommandEnum(str, Enum):
|
||||
|
||||
|
||||
class OnvifController:
|
||||
def __init__(self, config: FrigateConfig) -> None:
|
||||
def __init__(
|
||||
self, config: FrigateConfig, ptz_metrics: dict[str, PTZMetricsTypes]
|
||||
) -> None:
|
||||
self.cams: dict[str, ONVIFCamera] = {}
|
||||
self.ptz_metrics = ptz_metrics
|
||||
|
||||
for cam_name, cam in config.cameras.items():
|
||||
if not cam.enabled:
|
||||
@ -68,12 +74,51 @@ class OnvifController:
|
||||
ptz = onvif.create_ptz_service()
|
||||
request = ptz.create_type("GetConfigurationOptions")
|
||||
request.ConfigurationToken = profile.PTZConfiguration.token
|
||||
ptz_config = ptz.GetConfigurationOptions(request)
|
||||
|
||||
# setup moving request
|
||||
fov_space_id = next(
|
||||
(
|
||||
i
|
||||
for i, space in enumerate(
|
||||
ptz_config.Spaces.RelativePanTiltTranslationSpace
|
||||
)
|
||||
if "TranslationSpaceFov" in space["URI"]
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
# setup continuous moving request
|
||||
move_request = ptz.create_type("ContinuousMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["move_request"] = move_request
|
||||
|
||||
# setup relative moving request for autotracking
|
||||
move_request = ptz.create_type("RelativeMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
if move_request.Translation is None and fov_space_id is not None:
|
||||
move_request.Translation = ptz.GetStatus(
|
||||
{"ProfileToken": profile.token}
|
||||
).Position
|
||||
move_request.Translation.PanTilt.space = ptz_config["Spaces"][
|
||||
"RelativePanTiltTranslationSpace"
|
||||
][fov_space_id]["URI"]
|
||||
move_request.Translation.Zoom.space = ptz_config["Spaces"][
|
||||
"RelativeZoomTranslationSpace"
|
||||
][0]["URI"]
|
||||
if move_request.Speed is None:
|
||||
move_request.Speed = ptz.GetStatus({"ProfileToken": profile.token}).Position
|
||||
self.cams[camera_name]["relative_move_request"] = move_request
|
||||
|
||||
# setup relative moving request for autotracking
|
||||
move_request = ptz.create_type("AbsoluteMove")
|
||||
move_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["absolute_move_request"] = move_request
|
||||
|
||||
# status request for autotracking
|
||||
status_request = ptz.create_type("GetStatus")
|
||||
status_request.ProfileToken = profile.token
|
||||
self.cams[camera_name]["status_request"] = status_request
|
||||
|
||||
# setup existing presets
|
||||
try:
|
||||
presets: list[dict] = ptz.GetPresets({"ProfileToken": profile.token})
|
||||
@ -94,6 +139,20 @@ class OnvifController:
|
||||
if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace:
|
||||
supported_features.append("zoom")
|
||||
|
||||
if ptz_config.Spaces and ptz_config.Spaces.RelativePanTiltTranslationSpace:
|
||||
supported_features.append("pt-r")
|
||||
|
||||
if ptz_config.Spaces and ptz_config.Spaces.RelativeZoomTranslationSpace:
|
||||
supported_features.append("zoom-r")
|
||||
|
||||
if fov_space_id is not None:
|
||||
supported_features.append("pt-r-fov")
|
||||
self.cams[camera_name][
|
||||
"relative_fov_range"
|
||||
] = ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
|
||||
|
||||
self.cams[camera_name]["relative_fov_supported"] = fov_space_id is not None
|
||||
|
||||
self.cams[camera_name]["features"] = supported_features
|
||||
|
||||
self.cams[camera_name]["init"] = True
|
||||
@ -143,12 +202,71 @@ class OnvifController:
|
||||
|
||||
onvif.get_service("ptz").ContinuousMove(move_request)
|
||||
|
||||
def _move_relative(self, camera_name: str, pan, tilt, speed) -> None:
|
||||
if not self.cams[camera_name]["relative_fov_supported"]:
|
||||
logger.error(f"{camera_name} does not support ONVIF RelativeMove (FOV).")
|
||||
return
|
||||
|
||||
logger.debug(f"{camera_name} called RelativeMove: pan: {pan} tilt: {tilt}")
|
||||
|
||||
if self.cams[camera_name]["active"]:
|
||||
logger.warning(
|
||||
f"{camera_name} is already performing an action, not moving..."
|
||||
)
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].clear()
|
||||
logger.debug(f"PTZ start time: {datetime.datetime.now().timestamp()}")
|
||||
self.ptz_metrics[camera_name][
|
||||
"ptz_start_time"
|
||||
].value = datetime.datetime.now().timestamp()
|
||||
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
move_request = self.cams[camera_name]["relative_move_request"]
|
||||
|
||||
# function takes in -1 to 1 for pan and tilt, interpolate to the values of the camera.
|
||||
# The onvif spec says this can report as +INF and -INF, so this may need to be modified
|
||||
pan = numpy.interp(
|
||||
pan,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Max"],
|
||||
],
|
||||
)
|
||||
tilt = numpy.interp(
|
||||
tilt,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Max"],
|
||||
],
|
||||
)
|
||||
|
||||
move_request.Speed = {
|
||||
"PanTilt": {
|
||||
"x": speed,
|
||||
"y": speed,
|
||||
},
|
||||
"Zoom": 0,
|
||||
}
|
||||
|
||||
move_request.Translation.PanTilt.x = pan
|
||||
move_request.Translation.PanTilt.y = tilt
|
||||
move_request.Translation.Zoom.x = 0
|
||||
|
||||
onvif.get_service("ptz").RelativeMove(move_request)
|
||||
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
def _move_to_preset(self, camera_name: str, preset: str) -> None:
|
||||
if preset not in self.cams[camera_name]["presets"]:
|
||||
logger.error(f"{preset} is not a valid preset for {camera_name}")
|
||||
return
|
||||
|
||||
self.cams[camera_name]["active"] = True
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].clear()
|
||||
move_request = self.cams[camera_name]["move_request"]
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
preset_token = self.cams[camera_name]["presets"][preset]
|
||||
@ -158,6 +276,7 @@ class OnvifController:
|
||||
"PresetToken": preset_token,
|
||||
}
|
||||
)
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].set()
|
||||
self.cams[camera_name]["active"] = False
|
||||
|
||||
def _zoom(self, camera_name: str, command: OnvifCommandEnum) -> None:
|
||||
@ -216,3 +335,45 @@ class OnvifController:
|
||||
"features": self.cams[camera_name]["features"],
|
||||
"presets": list(self.cams[camera_name]["presets"].keys()),
|
||||
}
|
||||
|
||||
def get_camera_status(self, camera_name: str) -> dict[str, any]:
|
||||
if camera_name not in self.cams.keys():
|
||||
logger.error(f"Onvif is not setup for {camera_name}")
|
||||
return {}
|
||||
|
||||
if not self.cams[camera_name]["init"]:
|
||||
self._init_onvif(camera_name)
|
||||
|
||||
onvif: ONVIFCamera = self.cams[camera_name]["onvif"]
|
||||
status_request = self.cams[camera_name]["status_request"]
|
||||
status = onvif.get_service("ptz").GetStatus(status_request)
|
||||
|
||||
if status.MoveStatus.PanTilt == "IDLE" and status.MoveStatus.Zoom == "IDLE":
|
||||
self.cams[camera_name]["active"] = False
|
||||
if not self.ptz_metrics[camera_name]["ptz_stopped"].is_set():
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].set()
|
||||
|
||||
logger.debug(f"PTZ stop time: {datetime.datetime.now().timestamp()}")
|
||||
|
||||
self.ptz_metrics[camera_name][
|
||||
"ptz_stop_time"
|
||||
].value = datetime.datetime.now().timestamp()
|
||||
else:
|
||||
self.cams[camera_name]["active"] = True
|
||||
if self.ptz_metrics[camera_name]["ptz_stopped"].is_set():
|
||||
self.ptz_metrics[camera_name]["ptz_stopped"].clear()
|
||||
|
||||
logger.debug(f"PTZ start time: {datetime.datetime.now().timestamp()}")
|
||||
|
||||
self.ptz_metrics[camera_name][
|
||||
"ptz_start_time"
|
||||
].value = datetime.datetime.now().timestamp()
|
||||
self.ptz_metrics[camera_name]["ptz_stop_time"].value = 0
|
||||
|
||||
return {
|
||||
"pan": status.Position.PanTilt.x,
|
||||
"tilt": status.Position.PanTilt.y,
|
||||
"zoom": status.Position.Zoom.x,
|
||||
"pantilt_moving": status.MoveStatus.PanTilt,
|
||||
"zoom_moving": status.MoveStatus.Zoom,
|
||||
}
|
||||
@ -5,8 +5,10 @@ import numpy as np
|
||||
from norfair import Detection, Drawable, Tracker, draw_boxes
|
||||
from norfair.drawing.drawer import Drawer
|
||||
|
||||
from frigate.config import DetectConfig
|
||||
from frigate.config import CameraConfig
|
||||
from frigate.ptz.autotrack import PtzMotionEstimator
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.types import PTZMetricsTypes
|
||||
from frigate.util.image import intersection_over_union
|
||||
|
||||
|
||||
@ -54,12 +56,21 @@ def frigate_distance(detection: Detection, tracked_object) -> float:
|
||||
|
||||
|
||||
class NorfairTracker(ObjectTracker):
|
||||
def __init__(self, config: DetectConfig):
|
||||
def __init__(
|
||||
self,
|
||||
config: CameraConfig,
|
||||
ptz_metrics: PTZMetricsTypes,
|
||||
):
|
||||
self.tracked_objects = {}
|
||||
self.disappeared = {}
|
||||
self.positions = {}
|
||||
self.max_disappeared = config.max_disappeared
|
||||
self.detect_config = config
|
||||
self.max_disappeared = config.detect.max_disappeared
|
||||
self.camera_config = config
|
||||
self.detect_config = config.detect
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.ptz_autotracker_enabled = ptz_metrics["ptz_autotracker_enabled"]
|
||||
self.ptz_motion_estimator = {}
|
||||
self.camera_name = config.name
|
||||
self.track_id_map = {}
|
||||
# TODO: could also initialize a tracker per object class if there
|
||||
# was a good reason to have different distance calculations
|
||||
@ -69,6 +80,10 @@ class NorfairTracker(ObjectTracker):
|
||||
initialization_delay=0,
|
||||
hit_counter_max=self.max_disappeared,
|
||||
)
|
||||
if self.ptz_autotracker_enabled.value:
|
||||
self.ptz_motion_estimator = PtzMotionEstimator(
|
||||
self.camera_config, self.ptz_metrics
|
||||
)
|
||||
|
||||
def register(self, track_id, obj):
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
@ -230,7 +245,22 @@ class NorfairTracker(ObjectTracker):
|
||||
)
|
||||
)
|
||||
|
||||
tracked_objects = self.tracker.update(detections=norfair_detections)
|
||||
coord_transformations = None
|
||||
|
||||
if self.ptz_autotracker_enabled.value:
|
||||
# we must have been enabled by mqtt, so set up the estimator
|
||||
if not self.ptz_motion_estimator:
|
||||
self.ptz_motion_estimator = PtzMotionEstimator(
|
||||
self.camera_config, self.ptz_metrics
|
||||
)
|
||||
|
||||
coord_transformations = self.ptz_motion_estimator.motion_estimator(
|
||||
detections, frame_time, self.camera_name
|
||||
)
|
||||
|
||||
tracked_objects = self.tracker.update(
|
||||
detections=norfair_detections, coord_transformations=coord_transformations
|
||||
)
|
||||
|
||||
# update or create new tracks
|
||||
active_ids = []
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from multiprocessing.context import Process
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from multiprocessing.synchronize import Event
|
||||
from typing import Optional, TypedDict
|
||||
|
||||
from faster_fifo import Queue
|
||||
@ -25,6 +26,14 @@ class CameraMetricsTypes(TypedDict):
|
||||
skipped_fps: Synchronized
|
||||
|
||||
|
||||
class PTZMetricsTypes(TypedDict):
|
||||
ptz_autotracker_enabled: Synchronized
|
||||
ptz_stopped: Event
|
||||
ptz_reset: Event
|
||||
ptz_start_time: Synchronized
|
||||
ptz_stop_time: Synchronized
|
||||
|
||||
|
||||
class FeatureMetricsTypes(TypedDict):
|
||||
audio_enabled: Synchronized
|
||||
record_enabled: Synchronized
|
||||
|
||||
@ -1,23 +1,19 @@
|
||||
"""Utilities for builtin types manipulation."""
|
||||
|
||||
import copy
|
||||
import ctypes
|
||||
import datetime
|
||||
import logging
|
||||
import multiprocessing
|
||||
import re
|
||||
import shlex
|
||||
import time
|
||||
import urllib.parse
|
||||
from collections import Counter
|
||||
from collections.abc import Mapping
|
||||
from queue import Empty, Full
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
import pytz
|
||||
import yaml
|
||||
from faster_fifo import DEFAULT_CIRCULAR_BUFFER_SIZE, DEFAULT_TIMEOUT
|
||||
from faster_fifo import Queue as FFQueue
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
@ -63,50 +59,6 @@ class EventsPerSecond:
|
||||
del self._timestamps[0]
|
||||
|
||||
|
||||
class LimitedQueue(FFQueue):
|
||||
def __init__(
|
||||
self,
|
||||
maxsize=0,
|
||||
max_size_bytes=DEFAULT_CIRCULAR_BUFFER_SIZE,
|
||||
loads=None,
|
||||
dumps=None,
|
||||
):
|
||||
super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps)
|
||||
self.maxsize = maxsize
|
||||
self.size = multiprocessing.RawValue(
|
||||
ctypes.c_int, 0
|
||||
) # Add a counter for the number of items in the queue
|
||||
|
||||
def put(self, x, block=True, timeout=DEFAULT_TIMEOUT):
|
||||
if self.maxsize > 0 and self.size.value >= self.maxsize:
|
||||
if block:
|
||||
start_time = time.time()
|
||||
while self.size.value >= self.maxsize:
|
||||
remaining = timeout - (time.time() - start_time)
|
||||
if remaining <= 0.0:
|
||||
raise Full
|
||||
time.sleep(min(remaining, 0.1))
|
||||
else:
|
||||
raise Full
|
||||
self.size.value += 1
|
||||
return super().put(x, block=block, timeout=timeout)
|
||||
|
||||
def get(self, block=True, timeout=DEFAULT_TIMEOUT):
|
||||
if self.size.value <= 0 and not block:
|
||||
raise Empty
|
||||
self.size.value -= 1
|
||||
return super().get(block=block, timeout=timeout)
|
||||
|
||||
def qsize(self):
|
||||
return self.size
|
||||
|
||||
def empty(self):
|
||||
return self.qsize() == 0
|
||||
|
||||
def full(self):
|
||||
return self.qsize() == self.maxsize
|
||||
|
||||
|
||||
def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict:
|
||||
"""
|
||||
:param dct1: First dict to merge
|
||||
@ -224,3 +176,76 @@ def to_relative_box(
|
||||
(box[2] - box[0]) / width, # w
|
||||
(box[3] - box[1]) / height, # h
|
||||
)
|
||||
|
||||
|
||||
def create_mask(frame_shape, mask):
|
||||
mask_img = np.zeros(frame_shape, np.uint8)
|
||||
mask_img[:] = 255
|
||||
|
||||
|
||||
def update_yaml_from_url(file_path, url):
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
|
||||
|
||||
for key_path_str, new_value_list in query_string.items():
|
||||
key_path = key_path_str.split(".")
|
||||
for i in range(len(key_path)):
|
||||
try:
|
||||
index = int(key_path[i])
|
||||
key_path[i] = (key_path[i - 1], index)
|
||||
key_path.pop(i - 1)
|
||||
except ValueError:
|
||||
pass
|
||||
new_value = new_value_list[0]
|
||||
update_yaml_file(file_path, key_path, new_value)
|
||||
|
||||
|
||||
def update_yaml_file(file_path, key_path, new_value):
|
||||
yaml = YAML()
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.load(f)
|
||||
|
||||
data = update_yaml(data, key_path, new_value)
|
||||
|
||||
with open(file_path, "w") as f:
|
||||
yaml.dump(data, f)
|
||||
|
||||
|
||||
def update_yaml(data, key_path, new_value):
|
||||
temp = data
|
||||
for key in key_path[:-1]:
|
||||
if isinstance(key, tuple):
|
||||
if key[0] not in temp:
|
||||
temp[key[0]] = [{}] * max(1, key[1] + 1)
|
||||
elif len(temp[key[0]]) <= key[1]:
|
||||
temp[key[0]] += [{}] * (key[1] - len(temp[key[0]]) + 1)
|
||||
temp = temp[key[0]][key[1]]
|
||||
else:
|
||||
if key not in temp:
|
||||
temp[key] = {}
|
||||
temp = temp[key]
|
||||
|
||||
last_key = key_path[-1]
|
||||
if new_value == "":
|
||||
if isinstance(last_key, tuple):
|
||||
del temp[last_key[0]][last_key[1]]
|
||||
else:
|
||||
del temp[last_key]
|
||||
else:
|
||||
if isinstance(last_key, tuple):
|
||||
if last_key[0] not in temp:
|
||||
temp[last_key[0]] = [{}] * max(1, last_key[1] + 1)
|
||||
elif len(temp[last_key[0]]) <= last_key[1]:
|
||||
temp[last_key[0]] += [{}] * (last_key[1] - len(temp[last_key[0]]) + 1)
|
||||
temp[last_key[0]][last_key[1]] = new_value
|
||||
else:
|
||||
if (
|
||||
last_key in temp
|
||||
and isinstance(temp[last_key], dict)
|
||||
and isinstance(new_value, dict)
|
||||
):
|
||||
temp[last_key].update(new_value)
|
||||
else:
|
||||
temp[last_key] = new_value
|
||||
|
||||
return data
|
||||
|
||||
@ -22,8 +22,10 @@ from frigate.log import LogPipe
|
||||
from frigate.motion import MotionDetector
|
||||
from frigate.motion.improved_motion import ImprovedMotionDetector
|
||||
from frigate.object_detection import RemoteObjectDetector
|
||||
from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.norfair_tracker import NorfairTracker
|
||||
from frigate.types import PTZMetricsTypes
|
||||
from frigate.util.builtin import EventsPerSecond
|
||||
from frigate.util.image import (
|
||||
FrameManager,
|
||||
@ -461,6 +463,7 @@ def track_camera(
|
||||
result_connection,
|
||||
detected_objects_queue,
|
||||
process_info,
|
||||
ptz_metrics,
|
||||
):
|
||||
stop_event = mp.Event()
|
||||
|
||||
@ -497,7 +500,7 @@ def track_camera(
|
||||
name, labelmap, detection_queue, result_connection, model_config, stop_event
|
||||
)
|
||||
|
||||
object_tracker = NorfairTracker(config.detect)
|
||||
object_tracker = NorfairTracker(config, ptz_metrics)
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
|
||||
@ -518,6 +521,7 @@ def track_camera(
|
||||
detection_enabled,
|
||||
motion_enabled,
|
||||
stop_event,
|
||||
ptz_metrics,
|
||||
)
|
||||
|
||||
logger.info(f"{name}: exiting subprocess")
|
||||
@ -742,6 +746,7 @@ def process_frames(
|
||||
detection_enabled: mp.Value,
|
||||
motion_enabled: mp.Value,
|
||||
stop_event,
|
||||
ptz_metrics: PTZMetricsTypes,
|
||||
exit_on_empty: bool = False,
|
||||
):
|
||||
fps = process_info["process_fps"]
|
||||
@ -777,8 +782,19 @@ def process_frames(
|
||||
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||
continue
|
||||
|
||||
# look for motion if enabled
|
||||
motion_boxes = motion_detector.detect(frame) if motion_enabled.value else []
|
||||
# look for motion if enabled and ptz is not moving
|
||||
# ptz_moving_at_frame_time() always returns False for
|
||||
# non ptz/autotracking cameras
|
||||
motion_boxes = (
|
||||
motion_detector.detect(frame)
|
||||
if motion_enabled.value
|
||||
and not ptz_moving_at_frame_time(
|
||||
frame_time,
|
||||
ptz_metrics["ptz_start_time"].value,
|
||||
ptz_metrics["ptz_stop_time"].value,
|
||||
)
|
||||
else []
|
||||
)
|
||||
|
||||
regions = []
|
||||
consolidated_detections = []
|
||||
|
||||
@ -1,9 +1,12 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
nvidia-pyindex; platform_machine == 'x86_64'
|
||||
nvidia-tensorrt == 8.4.1.5; platform_machine == 'x86_64'
|
||||
cuda-python == 11.7; platform_machine == 'x86_64'
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
numpy < 1.24; platform_machine == 'x86_64'
|
||||
tensorrt == 8.5.3.*; platform_machine == 'x86_64'
|
||||
cuda-python == 11.8; platform_machine == 'x86_64'
|
||||
cython == 0.29.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvrtc-cu11 == 11.7.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
onnx==1.14.0; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
@ -15,6 +15,7 @@ pydantic == 1.10.*
|
||||
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
|
||||
PyYAML == 6.0
|
||||
pytz == 2023.3
|
||||
ruamel.yaml == 0.17.*
|
||||
tzlocal == 5.0.*
|
||||
types-PyYAML == 6.0.*
|
||||
requests == 2.31.*
|
||||
|
||||
@ -23,7 +23,7 @@ export default function CameraControlPanel({ camera = '' }) {
|
||||
return;
|
||||
}
|
||||
|
||||
sendPtz(`preset-${currentPreset}`);
|
||||
sendPtz(`preset_${currentPreset}`);
|
||||
setCurrentPreset('');
|
||||
};
|
||||
|
||||
|
||||
20
web/src/icons/Score.jsx
Normal file
20
web/src/icons/Score.jsx
Normal file
@ -0,0 +1,20 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function Score({ className = 'h-6 w-6', stroke = 'currentColor', fill = 'currentColor', onClick = () => {} }) {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className={className}
|
||||
fill={fill}
|
||||
viewBox="0 0 24 24"
|
||||
stroke={stroke}
|
||||
onClick={onClick}
|
||||
>
|
||||
<title>percent</title>
|
||||
<path d="M12,9A3,3 0 0,0 9,12A3,3 0 0,0 12,15A3,3 0 0,0 15,12A3,3 0 0,0 12,9M19,19H15V21H19A2,2 0 0,0 21,19V15H19M19,3H15V5H19V9H21V5A2,2 0 0,0 19,3M5,5H9V3H5A2,2 0 0,0 3,5V9H5M5,15H3V19A2,2 0 0,0 5,21H9V19H5V15Z" />
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(Score);
|
||||
@ -6,7 +6,7 @@ import Heading from '../components/Heading';
|
||||
import WebRtcPlayer from '../components/WebRtcPlayer';
|
||||
import '../components/MsePlayer';
|
||||
import useSWR from 'swr';
|
||||
import { useMemo } from 'preact/hooks';
|
||||
import { useMemo, useState } from 'preact/hooks';
|
||||
import CameraControlPanel from '../components/CameraControlPanel';
|
||||
import { baseUrl } from '../api/baseUrl';
|
||||
|
||||
@ -26,16 +26,19 @@ export default function Birdseye() {
|
||||
.map(([_, camera]) => camera.name);
|
||||
}, [config]);
|
||||
|
||||
const [isMaxWidth, setIsMaxWidth] = useState(false);
|
||||
|
||||
if (!config || !sourceIsLoaded) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
|
||||
let player;
|
||||
const playerClass = isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full';
|
||||
if (viewSource == 'mse' && config.birdseye.restream) {
|
||||
if ('MediaSource' in window) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ptzCameras.length && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full'}>
|
||||
<video-stream
|
||||
mode="mse"
|
||||
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=birdseye`)}
|
||||
@ -52,10 +55,10 @@ export default function Birdseye() {
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
} else if (viewSource == 'webrtc' && config.birdseye.restream) {
|
||||
} else if (viewSource == 'webrtc' ) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ptzCameras.length && config.birdseye.restream && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full'}>
|
||||
<WebRtcPlayer camera="birdseye" />
|
||||
</div>
|
||||
</Fragment>
|
||||
@ -63,7 +66,7 @@ export default function Birdseye() {
|
||||
} else {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={ptzCameras.length ? 'max-w-5xl xl:w-1/2' : 'max-w-5xl'}>
|
||||
<div className={ ptzCameras.length && config.birdseye.restream && !isMaxWidth ? 'max-w-5xl xl:w-1/2' : 'w-full' }>
|
||||
<JSMpegPlayer camera="birdseye" />
|
||||
</div>
|
||||
</Fragment>
|
||||
@ -77,26 +80,37 @@ export default function Birdseye() {
|
||||
Birdseye
|
||||
</Heading>
|
||||
|
||||
<button
|
||||
className="bg-gray-500 hover:bg-gray-700 text-white font-bold py-2 px-4 rounded hidden md:inline"
|
||||
onClick={() => setIsMaxWidth(!isMaxWidth)}
|
||||
>
|
||||
Toggle width
|
||||
</button>
|
||||
|
||||
{config.birdseye.restream && (
|
||||
<select
|
||||
className="basis-1/8 cursor-pointer rounded dark:bg-slate-800"
|
||||
value={viewSource}
|
||||
onChange={(e) => setViewSource(e.target.value)}
|
||||
key="width-changer"
|
||||
>
|
||||
{sourceValues.map((item) => (
|
||||
<option key={item} value={item}>
|
||||
{item}
|
||||
</option>
|
||||
))}
|
||||
|
||||
</select>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="xl:flex justify-between">
|
||||
{player}
|
||||
<div className={playerClass}> {/* Use dynamic class */}
|
||||
{player}
|
||||
</div>
|
||||
|
||||
{ptzCameras.length ? (
|
||||
<div className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow p-4 w-full sm:w-min xl:h-min xl:w-1/2">
|
||||
<div className="dark:bg-gray-800 shadow-md hover:shadow-lg rounded-lg transition-shadow p-4 sm:w-min xl:h-min {playerClass}">
|
||||
<Heading size="sm">Control Panel</Heading>
|
||||
{ptzCameras.map((camera) => (
|
||||
<div className="p-4" key={camera}>
|
||||
|
||||
@ -7,7 +7,7 @@ import { useResizeObserver } from '../hooks';
|
||||
import { useCallback, useMemo, useRef, useState } from 'preact/hooks';
|
||||
import { useApiHost } from '../api';
|
||||
import useSWR from 'swr';
|
||||
|
||||
import axios from 'axios';
|
||||
export default function CameraMasks({ camera }) {
|
||||
const { data: config } = useSWR('config');
|
||||
const apiHost = useApiHost();
|
||||
@ -95,12 +95,53 @@ export default function CameraMasks({ camera }) {
|
||||
[motionMaskPoints, setMotionMaskPoints]
|
||||
);
|
||||
|
||||
const handleCopyMotionMasks = useCallback(async () => {
|
||||
await window.navigator.clipboard.writeText(` motion:
|
||||
mask:
|
||||
${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`);
|
||||
const handleCopyMotionMasks = useCallback(() => {
|
||||
const textToCopy = ` motion:
|
||||
mask:
|
||||
${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).join('\n')}`;
|
||||
|
||||
if (window.navigator.clipboard && window.navigator.clipboard.writeText) {
|
||||
// Use Clipboard API if available
|
||||
window.navigator.clipboard.writeText(textToCopy).catch((err) => {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
});
|
||||
} else {
|
||||
// Fallback to document.execCommand('copy')
|
||||
const textarea = document.createElement('textarea');
|
||||
textarea.value = textToCopy;
|
||||
document.body.appendChild(textarea);
|
||||
textarea.select();
|
||||
|
||||
try {
|
||||
const successful = document.execCommand('copy');
|
||||
if (!successful) {
|
||||
throw new Error('Failed to copy text');
|
||||
}
|
||||
} catch (err) {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
}
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
}
|
||||
}, [motionMaskPoints]);
|
||||
|
||||
const handleSaveMotionMasks = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = motionMaskPoints
|
||||
.map((mask, index) => `cameras.${camera}.motion.mask.${index}=${polylinePointsToPolyline(mask)}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, motionMaskPoints]);
|
||||
|
||||
|
||||
// Zone methods
|
||||
const handleEditZone = useCallback(
|
||||
(key) => {
|
||||
@ -127,15 +168,53 @@ ${motionMaskPoints.map((mask) => ` - ${polylinePointsToPolyline(mask)}`).jo
|
||||
);
|
||||
|
||||
const handleCopyZones = useCallback(async () => {
|
||||
await window.navigator.clipboard.writeText(` zones:
|
||||
const textToCopy = ` zones:
|
||||
${Object.keys(zonePoints)
|
||||
.map(
|
||||
(zoneName) => ` ${zoneName}:
|
||||
coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`
|
||||
)
|
||||
.join('\n')}`);
|
||||
coordinates: ${polylinePointsToPolyline(zonePoints[zoneName])}`).join('\n')}`;
|
||||
|
||||
if (window.navigator.clipboard && window.navigator.clipboard.writeText) {
|
||||
// Use Clipboard API if available
|
||||
window.navigator.clipboard.writeText(textToCopy).catch((err) => {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
});
|
||||
} else {
|
||||
// Fallback to document.execCommand('copy')
|
||||
const textarea = document.createElement('textarea');
|
||||
textarea.value = textToCopy;
|
||||
document.body.appendChild(textarea);
|
||||
textarea.select();
|
||||
|
||||
try {
|
||||
const successful = document.execCommand('copy');
|
||||
if (!successful) {
|
||||
throw new Error('Failed to copy text');
|
||||
}
|
||||
} catch (err) {
|
||||
throw new Error('Failed to copy text: ', err);
|
||||
}
|
||||
|
||||
document.body.removeChild(textarea);
|
||||
}
|
||||
}, [zonePoints]);
|
||||
|
||||
const handleSaveZones = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = Object.keys(zonePoints)
|
||||
.map((zoneName) => `cameras.${camera}.zones.${zoneName}.coordinates=${polylinePointsToPolyline(zonePoints[zoneName])}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, zonePoints]);
|
||||
|
||||
// Object methods
|
||||
const handleEditObjectMask = useCallback(
|
||||
(key, subkey) => {
|
||||
@ -175,6 +254,23 @@ ${Object.keys(objectMaskPoints)
|
||||
.join('\n')}`);
|
||||
}, [objectMaskPoints]);
|
||||
|
||||
const handleSaveObjectMasks = useCallback(async () => {
|
||||
try {
|
||||
const queryParameters = Object.keys(objectMaskPoints)
|
||||
.filter((objectName) => objectMaskPoints[objectName].length > 0)
|
||||
.map((objectName, index) => `cameras.${camera}.objects.filters.${objectName}.mask.${index}=${polylinePointsToPolyline(objectMaskPoints[objectName])}`)
|
||||
.join('&');
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
// handle successful response
|
||||
}
|
||||
} catch (error) {
|
||||
// handle error
|
||||
//console.error(error);
|
||||
}
|
||||
}, [camera, objectMaskPoints]);
|
||||
|
||||
const handleAddToObjectMask = useCallback(
|
||||
(key) => {
|
||||
const newObjectMaskPoints = { ...objectMaskPoints, [key]: [...objectMaskPoints[key], []] };
|
||||
@ -246,6 +342,7 @@ ${Object.keys(objectMaskPoints)
|
||||
editing={editing}
|
||||
title="Motion masks"
|
||||
onCopy={handleCopyMotionMasks}
|
||||
onSave={handleSaveMotionMasks}
|
||||
onCreate={handleAddMask}
|
||||
onEdit={handleEditMask}
|
||||
onRemove={handleRemoveMask}
|
||||
@ -258,6 +355,7 @@ ${Object.keys(objectMaskPoints)
|
||||
editing={editing}
|
||||
title="Zones"
|
||||
onCopy={handleCopyZones}
|
||||
onSave={handleSaveZones}
|
||||
onCreate={handleAddZone}
|
||||
onEdit={handleEditZone}
|
||||
onRemove={handleRemoveZone}
|
||||
@ -272,6 +370,7 @@ ${Object.keys(objectMaskPoints)
|
||||
title="Object masks"
|
||||
onAdd={handleAddToObjectMask}
|
||||
onCopy={handleCopyObjectMasks}
|
||||
onSave={handleSaveObjectMasks}
|
||||
onCreate={handleAddObjectMask}
|
||||
onEdit={handleEditObjectMask}
|
||||
onRemove={handleRemoveObjectMask}
|
||||
@ -407,6 +506,7 @@ function MaskValues({
|
||||
title,
|
||||
onAdd,
|
||||
onCopy,
|
||||
onSave,
|
||||
onCreate,
|
||||
onEdit,
|
||||
onRemove,
|
||||
@ -455,6 +555,8 @@ function MaskValues({
|
||||
[onAdd]
|
||||
);
|
||||
|
||||
|
||||
|
||||
return (
|
||||
<div className="overflow-hidden" onMouseOver={handleMousein} onMouseOut={handleMouseout}>
|
||||
<div className="flex space-x-4">
|
||||
@ -463,6 +565,7 @@ function MaskValues({
|
||||
</Heading>
|
||||
<Button onClick={onCopy}>Copy</Button>
|
||||
<Button onClick={onCreate}>Add</Button>
|
||||
<Button onClick={onSave}>Save</Button>
|
||||
</div>
|
||||
<pre className="relative overflow-auto font-mono text-gray-900 dark:text-gray-100 rounded bg-gray-100 dark:bg-gray-800 p-2">
|
||||
{yamlPrefix}
|
||||
|
||||
@ -30,6 +30,7 @@ import TimeAgo from '../components/TimeAgo';
|
||||
import Timepicker from '../components/TimePicker';
|
||||
import TimelineSummary from '../components/TimelineSummary';
|
||||
import TimelineEventOverlay from '../components/TimelineEventOverlay';
|
||||
import { Score } from '../icons/Score';
|
||||
|
||||
const API_LIMIT = 25;
|
||||
|
||||
@ -602,13 +603,10 @@ export default function Events({ path, ...props }) {
|
||||
<div className="m-2 flex grow">
|
||||
<div className="flex flex-col grow">
|
||||
<div className="capitalize text-lg font-bold">
|
||||
{event.sub_label
|
||||
? `${event.label.replaceAll('_', ' ')}: ${event.sub_label.replaceAll('_', ' ')}`
|
||||
: event.label.replaceAll('_', ' ')}
|
||||
{(event?.data?.top_score || event.top_score || 0) == 0
|
||||
? null
|
||||
: ` (${((event?.data?.top_score || event.top_score) * 100).toFixed(0)}%)`}
|
||||
{event.label.replaceAll('_', ' ')}
|
||||
{event.sub_label ? `: ${event.sub_label.replaceAll('_', ' ')}` : null}
|
||||
</div>
|
||||
|
||||
<div className="text-sm flex">
|
||||
<Clock className="h-5 w-5 mr-2 inline" />
|
||||
{formatUnixTimestampToDateTime(event.start_time, { ...config.ui })}
|
||||
@ -624,9 +622,18 @@ export default function Events({ path, ...props }) {
|
||||
<Camera className="h-5 w-5 mr-2 inline" />
|
||||
{event.camera.replaceAll('_', ' ')}
|
||||
</div>
|
||||
<div className="capitalize text-sm flex align-center">
|
||||
{event.zones.length ? <div className="capitalize text-sm flex align-center">
|
||||
<Zone className="w-5 h-5 mr-2 inline" />
|
||||
{event.zones.join(', ').replaceAll('_', ' ')}
|
||||
</div> : null}
|
||||
<div className="capitalize text-sm flex align-center">
|
||||
<Score className="w-5 h-5 mr-2 inline" />
|
||||
{(event?.data?.top_score || event.top_score || 0) == 0
|
||||
? null
|
||||
: `Label: ${((event?.data?.top_score || event.top_score) * 100).toFixed(0)}%`}
|
||||
{(event?.data?.sub_label_score || 0) == 0
|
||||
? null
|
||||
: `, Sub Label: ${(event?.data?.sub_label_score * 100).toFixed(0)}%`}
|
||||
</div>
|
||||
</div>
|
||||
<div class="hidden sm:flex flex-col justify-end mr-2">
|
||||
|
||||
@ -43,31 +43,119 @@ interface DateTimeStyle {
|
||||
strftime_fmt: string;
|
||||
}
|
||||
|
||||
// only used as a fallback if the browser does not support dateStyle/timeStyle in Intl.DateTimeFormat
|
||||
const formatMap: {
|
||||
[k: string]: {
|
||||
date: { year: 'numeric' | '2-digit'; month: 'long' | 'short' | '2-digit'; day: 'numeric' | '2-digit' };
|
||||
time: { hour: 'numeric'; minute: 'numeric'; second?: 'numeric'; timeZoneName?: 'short' | 'long' };
|
||||
};
|
||||
} = {
|
||||
full: {
|
||||
date: { year: 'numeric', month: 'long', day: 'numeric' },
|
||||
time: { hour: 'numeric', minute: 'numeric', second: 'numeric', timeZoneName: 'long' },
|
||||
},
|
||||
long: {
|
||||
date: { year: 'numeric', month: 'long', day: 'numeric' },
|
||||
time: { hour: 'numeric', minute: 'numeric', second: 'numeric', timeZoneName: 'long' },
|
||||
},
|
||||
medium: {
|
||||
date: { year: 'numeric', month: 'short', day: 'numeric' },
|
||||
time: { hour: 'numeric', minute: 'numeric', second: 'numeric' },
|
||||
},
|
||||
short: { date: { year: '2-digit', month: '2-digit', day: '2-digit' }, time: { hour: 'numeric', minute: 'numeric' } },
|
||||
};
|
||||
|
||||
/**
|
||||
* Attempts to get the system's time zone using Intl.DateTimeFormat. If that fails (for instance, in environments
|
||||
* where Intl is not fully supported), it calculates the UTC offset for the current system time and returns
|
||||
* it in a string format.
|
||||
*
|
||||
* Keeping the Intl.DateTimeFormat for now, as this is the recommended way to get the time zone.
|
||||
* https://stackoverflow.com/a/34602679
|
||||
*
|
||||
* Intl.DateTimeFormat function as of April 2023, works in 95.03% of the browsers used globally
|
||||
* https://caniuse.com/mdn-javascript_builtins_intl_datetimeformat_resolvedoptions_computed_timezone
|
||||
*
|
||||
* @returns {string} The resolved time zone or a calculated UTC offset.
|
||||
* The returned string will either be a named time zone (e.g., "America/Los_Angeles"), or it will follow
|
||||
* the format "UTC±HH:MM".
|
||||
*/
|
||||
const getResolvedTimeZone = () => {
|
||||
try {
|
||||
return Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
} catch (error) {
|
||||
const offsetMinutes = new Date().getTimezoneOffset();
|
||||
return `UTC${offsetMinutes < 0 ? '+' : '-'}${Math.abs(offsetMinutes / 60)
|
||||
.toString()
|
||||
.padStart(2, '0')}:${Math.abs(offsetMinutes % 60)
|
||||
.toString()
|
||||
.padStart(2, '0')}`;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Formats a Unix timestamp into a human-readable date/time string.
|
||||
*
|
||||
* The format of the output string is determined by a configuration object passed as an argument, which
|
||||
* may specify a time zone, 12- or 24-hour time, and various stylistic options for the date and time.
|
||||
* If these options are not specified, the function will use system defaults or sensible fallbacks.
|
||||
*
|
||||
* The function is robust to environments where the Intl API is not fully supported, and includes a
|
||||
* fallback method to create a formatted date/time string in such cases.
|
||||
*
|
||||
* @param {number} unixTimestamp - The Unix timestamp to be formatted.
|
||||
* @param {DateTimeStyle} config - User configuration object.
|
||||
* @returns {string} A formatted date/time string.
|
||||
*
|
||||
* @throws {Error} If the given unixTimestamp is not a valid number, the function will return 'Invalid time'.
|
||||
*/
|
||||
export const formatUnixTimestampToDateTime = (unixTimestamp: number, config: DateTimeStyle): string => {
|
||||
const { timezone, time_format, date_style, time_style, strftime_fmt } = config;
|
||||
const locale = window.navigator?.language || 'en-us';
|
||||
|
||||
if (isNaN(unixTimestamp)) {
|
||||
return 'Invalid time';
|
||||
}
|
||||
|
||||
try {
|
||||
const date = new Date(unixTimestamp * 1000);
|
||||
const resolvedTimeZone = getResolvedTimeZone();
|
||||
|
||||
// use strftime_fmt if defined in config file
|
||||
// use strftime_fmt if defined in config
|
||||
if (strftime_fmt) {
|
||||
const strftime_locale = strftime.timezone(getUTCOffset(date, timezone || Intl.DateTimeFormat().resolvedOptions().timeZone)).localizeByIdentifier(locale);
|
||||
const offset = getUTCOffset(date, timezone || resolvedTimeZone);
|
||||
const strftime_locale = strftime.timezone(offset).localizeByIdentifier(locale);
|
||||
return strftime_locale(strftime_fmt, date);
|
||||
}
|
||||
|
||||
// else use Intl.DateTimeFormat
|
||||
const formatter = new Intl.DateTimeFormat(locale, {
|
||||
// DateTime format options
|
||||
const options: Intl.DateTimeFormatOptions = {
|
||||
dateStyle: date_style,
|
||||
timeStyle: time_style,
|
||||
timeZone: timezone || Intl.DateTimeFormat().resolvedOptions().timeZone,
|
||||
hour12: time_format !== 'browser' ? time_format == '12hour' : undefined,
|
||||
});
|
||||
return formatter.format(date);
|
||||
};
|
||||
|
||||
// Only set timeZone option when resolvedTimeZone does not match UTC±HH:MM format, or when timezone is set in config
|
||||
const isUTCOffsetFormat = /^UTC[+-]\d{2}:\d{2}$/.test(resolvedTimeZone);
|
||||
if (timezone || !isUTCOffsetFormat) {
|
||||
options.timeZone = timezone || resolvedTimeZone;
|
||||
}
|
||||
|
||||
const formatter = new Intl.DateTimeFormat(locale, options);
|
||||
const formattedDateTime = formatter.format(date);
|
||||
|
||||
// Regex to check for existence of time. This is needed because dateStyle/timeStyle is not always supported.
|
||||
const containsTime = /\d{1,2}:\d{1,2}/.test(formattedDateTime);
|
||||
|
||||
// fallback if the browser does not support dateStyle/timeStyle in Intl.DateTimeFormat
|
||||
// This works even tough the timezone is undefined, it will use the runtime's default time zone
|
||||
if (!containsTime) {
|
||||
const dateOptions = { ...formatMap[date_style]?.date, timeZone: options.timeZone, hour12: options.hour12 };
|
||||
const timeOptions = { ...formatMap[time_style]?.time, timeZone: options.timeZone, hour12: options.hour12 };
|
||||
|
||||
return `${date.toLocaleDateString(locale, dateOptions)} ${date.toLocaleTimeString(locale, timeOptions)}`;
|
||||
}
|
||||
|
||||
return formattedDateTime;
|
||||
} catch (error) {
|
||||
return 'Invalid time';
|
||||
}
|
||||
@ -122,10 +210,19 @@ export const getDurationFromTimestamps = (start_time: number, end_time: number |
|
||||
* @returns number of minutes offset from UTC
|
||||
*/
|
||||
const getUTCOffset = (date: Date, timezone: string): number => {
|
||||
const utcDate = new Date(date.getTime() - (date.getTimezoneOffset() * 60 * 1000));
|
||||
// If timezone is in UTC±HH:MM format, parse it to get offset
|
||||
const utcOffsetMatch = timezone.match(/^UTC([+-])(\d{2}):(\d{2})$/);
|
||||
if (utcOffsetMatch) {
|
||||
const hours = parseInt(utcOffsetMatch[2], 10);
|
||||
const minutes = parseInt(utcOffsetMatch[3], 10);
|
||||
return (utcOffsetMatch[1] === '+' ? 1 : -1) * (hours * 60 + minutes);
|
||||
}
|
||||
|
||||
// Otherwise, calculate offset using provided timezone
|
||||
const utcDate = new Date(date.getTime() - date.getTimezoneOffset() * 60 * 1000);
|
||||
// locale of en-CA is required for proper locale format
|
||||
let iso = utcDate.toLocaleString('en-CA', { timeZone: timezone, hour12: false }).replace(', ', 'T');
|
||||
iso += `.${utcDate.getMilliseconds().toString().padStart(3, '0')}`;
|
||||
const target = new Date(`${iso}Z`);
|
||||
return (target.getTime() - utcDate.getTime()) / 60 / 1000;
|
||||
}
|
||||
return (target.getTime() - utcDate.getTime()) / 60 / 1000;
|
||||
};
|
||||
|
||||
Loading…
Reference in New Issue
Block a user