Merge branch '0.16' into dev
2
.github/workflows/pull_request.yml
vendored
@ -6,7 +6,7 @@ on:
|
|||||||
- "docs/**"
|
- "docs/**"
|
||||||
|
|
||||||
env:
|
env:
|
||||||
DEFAULT_PYTHON: 3.9
|
DEFAULT_PYTHON: 3.11
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_devcontainer:
|
build_devcontainer:
|
||||||
|
|||||||
20
docker/rockchip/COCO/coco_subset_20.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
./subset/000000005001.jpg
|
||||||
|
./subset/000000038829.jpg
|
||||||
|
./subset/000000052891.jpg
|
||||||
|
./subset/000000075612.jpg
|
||||||
|
./subset/000000098261.jpg
|
||||||
|
./subset/000000181542.jpg
|
||||||
|
./subset/000000215245.jpg
|
||||||
|
./subset/000000277005.jpg
|
||||||
|
./subset/000000288685.jpg
|
||||||
|
./subset/000000301421.jpg
|
||||||
|
./subset/000000334371.jpg
|
||||||
|
./subset/000000348481.jpg
|
||||||
|
./subset/000000373353.jpg
|
||||||
|
./subset/000000397681.jpg
|
||||||
|
./subset/000000414673.jpg
|
||||||
|
./subset/000000419312.jpg
|
||||||
|
./subset/000000465822.jpg
|
||||||
|
./subset/000000475732.jpg
|
||||||
|
./subset/000000559707.jpg
|
||||||
|
./subset/000000574315.jpg
|
||||||
BIN
docker/rockchip/COCO/subset/000000005001.jpg
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docker/rockchip/COCO/subset/000000038829.jpg
Normal file
|
After Width: | Height: | Size: 209 KiB |
BIN
docker/rockchip/COCO/subset/000000052891.jpg
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docker/rockchip/COCO/subset/000000075612.jpg
Normal file
|
After Width: | Height: | Size: 102 KiB |
BIN
docker/rockchip/COCO/subset/000000098261.jpg
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docker/rockchip/COCO/subset/000000181542.jpg
Normal file
|
After Width: | Height: | Size: 201 KiB |
BIN
docker/rockchip/COCO/subset/000000215245.jpg
Normal file
|
After Width: | Height: | Size: 233 KiB |
BIN
docker/rockchip/COCO/subset/000000277005.jpg
Normal file
|
After Width: | Height: | Size: 242 KiB |
BIN
docker/rockchip/COCO/subset/000000288685.jpg
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docker/rockchip/COCO/subset/000000301421.jpg
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docker/rockchip/COCO/subset/000000334371.jpg
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docker/rockchip/COCO/subset/000000348481.jpg
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docker/rockchip/COCO/subset/000000373353.jpg
Normal file
|
After Width: | Height: | Size: 281 KiB |
BIN
docker/rockchip/COCO/subset/000000397681.jpg
Normal file
|
After Width: | Height: | Size: 272 KiB |
BIN
docker/rockchip/COCO/subset/000000414673.jpg
Normal file
|
After Width: | Height: | Size: 152 KiB |
BIN
docker/rockchip/COCO/subset/000000419312.jpg
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
docker/rockchip/COCO/subset/000000465822.jpg
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
docker/rockchip/COCO/subset/000000475732.jpg
Normal file
|
After Width: | Height: | Size: 103 KiB |
BIN
docker/rockchip/COCO/subset/000000559707.jpg
Normal file
|
After Width: | Height: | Size: 203 KiB |
BIN
docker/rockchip/COCO/subset/000000574315.jpg
Normal file
|
After Width: | Height: | Size: 110 KiB |
@ -7,22 +7,26 @@ FROM wheels as rk-wheels
|
|||||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||||
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
||||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||||
|
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
||||||
RUN python3 -m pip config set global.break-system-packages true
|
RUN python3 -m pip config set global.break-system-packages true
|
||||||
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
||||||
|
RUN rm -rf /rk-wheels/opencv_python-*
|
||||||
|
|
||||||
FROM deps AS rk-frigate
|
FROM deps AS rk-frigate
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
||||||
pip3 install -U /deps/rk-wheels/*.whl --break-system-packages
|
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
COPY docker/rockchip/COCO /COCO
|
||||||
|
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
|
||||||
|
|
||||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
|
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
|
||||||
|
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-6/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||||
|
|||||||
82
docker/rockchip/conv2rknn.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
import rknn
|
||||||
|
import yaml
|
||||||
|
from rknn.api import RKNN
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(rknn.__path__[0] + "/VERSION") as file:
|
||||||
|
tk_version = file.read().strip()
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open("/config/conv2rknn.yaml", "r") as config_file:
|
||||||
|
configuration = yaml.safe_load(config_file)
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
|
||||||
|
|
||||||
|
if configuration["config"] != None:
|
||||||
|
rknn_config = configuration["config"]
|
||||||
|
else:
|
||||||
|
rknn_config = {}
|
||||||
|
|
||||||
|
if not os.path.isdir("/config/model_cache/rknn_cache/onnx"):
|
||||||
|
raise Exception(
|
||||||
|
"Place the onnx models you want to convert to rknn format in /config/model_cache/rknn_cache/onnx"
|
||||||
|
)
|
||||||
|
|
||||||
|
if "soc" not in configuration:
|
||||||
|
try:
|
||||||
|
with open("/proc/device-tree/compatible") as file:
|
||||||
|
soc = file.read().split(",")[-1].strip("\x00")
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception("Make sure to run docker in privileged mode.")
|
||||||
|
|
||||||
|
configuration["soc"] = [
|
||||||
|
soc,
|
||||||
|
]
|
||||||
|
|
||||||
|
if "quantization" not in configuration:
|
||||||
|
configuration["quantization"] = False
|
||||||
|
|
||||||
|
if "output_name" not in configuration:
|
||||||
|
configuration["output_name"] = "{{input_basename}}"
|
||||||
|
|
||||||
|
for input_filename in os.listdir("/config/model_cache/rknn_cache/onnx"):
|
||||||
|
for soc in configuration["soc"]:
|
||||||
|
quant = "i8" if configuration["quantization"] else "fp16"
|
||||||
|
|
||||||
|
input_path = "/config/model_cache/rknn_cache/onnx/" + input_filename
|
||||||
|
input_basename = input_filename[: input_filename.rfind(".")]
|
||||||
|
|
||||||
|
output_filename = (
|
||||||
|
configuration["output_name"].format(
|
||||||
|
quant=quant,
|
||||||
|
input_basename=input_basename,
|
||||||
|
soc=soc,
|
||||||
|
tk_version=tk_version,
|
||||||
|
)
|
||||||
|
+ ".rknn"
|
||||||
|
)
|
||||||
|
output_path = "/config/model_cache/rknn_cache/" + output_filename
|
||||||
|
|
||||||
|
rknn_config["target_platform"] = soc
|
||||||
|
|
||||||
|
rknn = RKNN(verbose=True)
|
||||||
|
rknn.config(**rknn_config)
|
||||||
|
|
||||||
|
if rknn.load_onnx(model=input_path) != 0:
|
||||||
|
raise Exception("Error loading model.")
|
||||||
|
|
||||||
|
if (
|
||||||
|
rknn.build(
|
||||||
|
do_quantization=configuration["quantization"],
|
||||||
|
dataset="/COCO/coco_subset_20.txt",
|
||||||
|
)
|
||||||
|
!= 0
|
||||||
|
):
|
||||||
|
raise Exception("Error building model.")
|
||||||
|
|
||||||
|
if rknn.export_rknn(output_path) != 0:
|
||||||
|
raise Exception("Error exporting rknn model.")
|
||||||
@ -1 +1,2 @@
|
|||||||
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp311-cp311-linux_aarch64.whl
|
rknn-toolkit2 == 2.3.0
|
||||||
|
rknn-toolkit-lite2 == 2.3.0
|
||||||
@ -67,14 +67,15 @@ ffmpeg:
|
|||||||
|
|
||||||
### Annke C800
|
### Annke C800
|
||||||
|
|
||||||
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
|
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be adjusted using the `apple_compatibility` config.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
annkec800: # <------ Name the camera
|
annkec800: # <------ Name the camera
|
||||||
ffmpeg:
|
ffmpeg:
|
||||||
|
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||||
output_args:
|
output_args:
|
||||||
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
|
record: preset-record-generic-audio-aac
|
||||||
|
|
||||||
inputs:
|
inputs:
|
||||||
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
|
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
|
||||||
|
|||||||
@ -175,6 +175,16 @@ For more information on the various values across different distributions, see h
|
|||||||
|
|
||||||
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'`
|
||||||
|
|
||||||
|
#### Stats for SR-IOV devices
|
||||||
|
|
||||||
|
When using virtualized GPUs via SR-IOV, additional args are needed for GPU stats to function. This can be enabled with the following config:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
telemetry:
|
||||||
|
stats:
|
||||||
|
sriov: True
|
||||||
|
```
|
||||||
|
|
||||||
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||||
|
|
||||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||||
|
|||||||
@ -544,7 +544,7 @@ Hardware accelerated object detection is supported on the following SoCs:
|
|||||||
- RK3576
|
- RK3576
|
||||||
- RK3588
|
- RK3588
|
||||||
|
|
||||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@ -617,7 +617,41 @@ $ cat /sys/kernel/debug/rknpu/load
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2`. Note, that there is only post-processing for the supported models.
|
||||||
|
|
||||||
|
### Converting your own onnx model to rknn format
|
||||||
|
|
||||||
|
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
|
||||||
|
|
||||||
|
- Place one ore more models in onnx format in the directory `config/model_cache/rknn_cache/onnx` on your docker host (this might require `sudo` privileges).
|
||||||
|
- Save the configuration file under `config/conv2rknn.yaml` (see below for details).
|
||||||
|
- Run `docker exec <frigate_container_id> python3 /opt/conv2rknn.py`. If the conversion was successful, the rknn models will be placed in `config/model_cache/rknn_cache`.
|
||||||
|
|
||||||
|
This is an example configuration file that you need to adjust to your specific onnx model:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
|
||||||
|
quantization: false
|
||||||
|
|
||||||
|
output_name: "{input_basename}"
|
||||||
|
|
||||||
|
config:
|
||||||
|
mean_values: [[0, 0, 0]]
|
||||||
|
std_values: [[255, 255, 255]]
|
||||||
|
quant_img_rgb2bgr: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Explanation of the paramters:
|
||||||
|
|
||||||
|
- `soc`: A list of all SoCs you want to build the rknn model for. If you don't specify this parameter, the script tries to find out your SoC and builds the rknn model for this one.
|
||||||
|
- `quantization`: true: 8 bit integer (i8) quantization, false: 16 bit float (fp16). Default: false.
|
||||||
|
- `output_name`: The output name of the model. The following variables are available:
|
||||||
|
- `quant`: "i8" or "fp16" depending on the config
|
||||||
|
- `input_basename`: the basename of the input model (e.g. "my_model" if the input model is calles "my_model.onnx")
|
||||||
|
- `soc`: the SoC this model was build for (e.g. "rk3588")
|
||||||
|
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
|
||||||
|
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
|
||||||
|
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||||
|
|
||||||
## Hailo-8l
|
## Hailo-8l
|
||||||
|
|
||||||
|
|||||||
@ -242,6 +242,8 @@ ffmpeg:
|
|||||||
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
# If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage
|
||||||
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
# NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout.
|
||||||
retry_interval: 10
|
retry_interval: 10
|
||||||
|
# Optional: Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players. (default: shown below)
|
||||||
|
apple_compatibility: false
|
||||||
|
|
||||||
# Optional: Detect configuration
|
# Optional: Detect configuration
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
@ -811,11 +813,13 @@ telemetry:
|
|||||||
- lo
|
- lo
|
||||||
# Optional: Configure system stats
|
# Optional: Configure system stats
|
||||||
stats:
|
stats:
|
||||||
# Enable AMD GPU stats (default: shown below)
|
# Optional: Enable AMD GPU stats (default: shown below)
|
||||||
amd_gpu_stats: True
|
amd_gpu_stats: True
|
||||||
# Enable Intel GPU stats (default: shown below)
|
# Optional: Enable Intel GPU stats (default: shown below)
|
||||||
intel_gpu_stats: True
|
intel_gpu_stats: True
|
||||||
# Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
# Optional: Treat GPU as SR-IOV to fix GPU stats (default: shown below)
|
||||||
|
sriov: False
|
||||||
|
# Optional: Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below)
|
||||||
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
||||||
network_bandwidth: False
|
network_bandwidth: False
|
||||||
# Optional: Enable the latest version outbound check (default: shown below)
|
# Optional: Enable the latest version outbound check (default: shown below)
|
||||||
|
|||||||
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import random
|
||||||
|
import shutil
|
||||||
|
import string
|
||||||
|
|
||||||
from fastapi import APIRouter, Request, UploadFile
|
from fastapi import APIRouter, Request, UploadFile
|
||||||
from fastapi.responses import JSONResponse
|
from fastapi.responses import JSONResponse
|
||||||
@ -22,7 +25,13 @@ def get_faces():
|
|||||||
|
|
||||||
for name in os.listdir(FACE_DIR):
|
for name in os.listdir(FACE_DIR):
|
||||||
face_dict[name] = []
|
face_dict[name] = []
|
||||||
for file in os.listdir(os.path.join(FACE_DIR, name)):
|
|
||||||
|
face_dir = os.path.join(FACE_DIR, name)
|
||||||
|
|
||||||
|
if not os.path.isdir(face_dir):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for file in os.listdir(face_dir):
|
||||||
face_dict[name].append(file)
|
face_dict[name].append(file)
|
||||||
|
|
||||||
return JSONResponse(status_code=200, content=face_dict)
|
return JSONResponse(status_code=200, content=face_dict)
|
||||||
@ -38,6 +47,39 @@ async def register_face(request: Request, name: str, file: UploadFile):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/faces/train/{name}/classify")
|
||||||
|
def train_face(name: str, body: dict = None):
|
||||||
|
json: dict[str, any] = body or {}
|
||||||
|
training_file = os.path.join(
|
||||||
|
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not training_file or not os.path.isfile(training_file):
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": False,
|
||||||
|
"message": f"Invalid filename or no file exists: {training_file}",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=404,
|
||||||
|
)
|
||||||
|
|
||||||
|
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||||
|
new_name = f"{name}-{rand_id}.webp"
|
||||||
|
new_file = os.path.join(FACE_DIR, f"{name}/{new_name}")
|
||||||
|
shutil.move(training_file, new_file)
|
||||||
|
return JSONResponse(
|
||||||
|
content=(
|
||||||
|
{
|
||||||
|
"success": True,
|
||||||
|
"message": f"Successfully saved {training_file} as {new_name}.",
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status_code=200,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.post("/faces/{name}/delete")
|
@router.post("/faces/{name}/delete")
|
||||||
def deregister_faces(request: Request, name: str, body: dict = None):
|
def deregister_faces(request: Request, name: str, body: dict = None):
|
||||||
json: dict[str, any] = body or {}
|
json: dict[str, any] = body or {}
|
||||||
|
|||||||
@ -34,12 +34,14 @@ from frigate.const import (
|
|||||||
CLIPS_DIR,
|
CLIPS_DIR,
|
||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
|
FACE_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
SHM_FRAMES_VAR,
|
SHM_FRAMES_VAR,
|
||||||
)
|
)
|
||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
||||||
|
from frigate.embeddings.types import EmbeddingsMetrics
|
||||||
from frigate.events.audio import AudioProcessor
|
from frigate.events.audio import AudioProcessor
|
||||||
from frigate.events.cleanup import EventCleanup
|
from frigate.events.cleanup import EventCleanup
|
||||||
from frigate.events.external import ExternalEventProcessor
|
from frigate.events.external import ExternalEventProcessor
|
||||||
@ -88,6 +90,9 @@ class FrigateApp:
|
|||||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||||
self.log_queue: Queue = mp.Queue()
|
self.log_queue: Queue = mp.Queue()
|
||||||
self.camera_metrics: dict[str, CameraMetrics] = {}
|
self.camera_metrics: dict[str, CameraMetrics] = {}
|
||||||
|
self.embeddings_metrics: EmbeddingsMetrics | None = (
|
||||||
|
EmbeddingsMetrics() if config.semantic_search.enabled else None
|
||||||
|
)
|
||||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||||
self.processes: dict[str, int] = {}
|
self.processes: dict[str, int] = {}
|
||||||
self.embeddings: Optional[EmbeddingsContext] = None
|
self.embeddings: Optional[EmbeddingsContext] = None
|
||||||
@ -96,14 +101,19 @@ class FrigateApp:
|
|||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
def ensure_dirs(self) -> None:
|
def ensure_dirs(self) -> None:
|
||||||
for d in [
|
dirs = [
|
||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
f"{CLIPS_DIR}/cache",
|
f"{CLIPS_DIR}/cache",
|
||||||
CACHE_DIR,
|
CACHE_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
]:
|
]
|
||||||
|
|
||||||
|
if self.config.face_recognition.enabled:
|
||||||
|
dirs.append(FACE_DIR)
|
||||||
|
|
||||||
|
for d in dirs:
|
||||||
if not os.path.exists(d) and not os.path.islink(d):
|
if not os.path.exists(d) and not os.path.islink(d):
|
||||||
logger.info(f"Creating directory: {d}")
|
logger.info(f"Creating directory: {d}")
|
||||||
os.makedirs(d)
|
os.makedirs(d)
|
||||||
@ -229,7 +239,10 @@ class FrigateApp:
|
|||||||
embedding_process = util.Process(
|
embedding_process = util.Process(
|
||||||
target=manage_embeddings,
|
target=manage_embeddings,
|
||||||
name="embeddings_manager",
|
name="embeddings_manager",
|
||||||
args=(self.config,),
|
args=(
|
||||||
|
self.config,
|
||||||
|
self.embeddings_metrics,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
embedding_process.daemon = True
|
embedding_process.daemon = True
|
||||||
self.embedding_process = embedding_process
|
self.embedding_process = embedding_process
|
||||||
@ -491,7 +504,11 @@ class FrigateApp:
|
|||||||
self.stats_emitter = StatsEmitter(
|
self.stats_emitter = StatsEmitter(
|
||||||
self.config,
|
self.config,
|
||||||
stats_init(
|
stats_init(
|
||||||
self.config, self.camera_metrics, self.detectors, self.processes
|
self.config,
|
||||||
|
self.camera_metrics,
|
||||||
|
self.embeddings_metrics,
|
||||||
|
self.detectors,
|
||||||
|
self.processes,
|
||||||
),
|
),
|
||||||
self.stop_event,
|
self.stop_event,
|
||||||
)
|
)
|
||||||
|
|||||||
130
frigate/camera/activity_manager.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
"""Manage camera activity and updating listeners."""
|
||||||
|
|
||||||
|
from collections import Counter
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from frigate.config.config import FrigateConfig
|
||||||
|
|
||||||
|
|
||||||
|
class CameraActivityManager:
|
||||||
|
def __init__(
|
||||||
|
self, config: FrigateConfig, publish: Callable[[str, any], None]
|
||||||
|
) -> None:
|
||||||
|
self.config = config
|
||||||
|
self.publish = publish
|
||||||
|
self.last_camera_activity: dict[str, dict[str, any]] = {}
|
||||||
|
self.camera_all_object_counts: dict[str, Counter] = {}
|
||||||
|
self.camera_active_object_counts: dict[str, Counter] = {}
|
||||||
|
self.zone_all_object_counts: dict[str, Counter] = {}
|
||||||
|
self.zone_active_object_counts: dict[str, Counter] = {}
|
||||||
|
self.all_zone_labels: dict[str, set[str]] = {}
|
||||||
|
|
||||||
|
for camera_config in config.cameras.values():
|
||||||
|
if not camera_config.enabled:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.last_camera_activity[camera_config.name] = {}
|
||||||
|
self.camera_all_object_counts[camera_config.name] = Counter()
|
||||||
|
self.camera_active_object_counts[camera_config.name] = Counter()
|
||||||
|
|
||||||
|
for zone, zone_config in camera_config.zones.items():
|
||||||
|
if zone not in self.all_zone_labels:
|
||||||
|
self.zone_all_object_counts[zone] = Counter()
|
||||||
|
self.zone_active_object_counts[zone] = Counter()
|
||||||
|
self.all_zone_labels[zone] = set()
|
||||||
|
|
||||||
|
self.all_zone_labels[zone].update(zone_config.objects)
|
||||||
|
|
||||||
|
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
|
||||||
|
all_objects: list[dict[str, any]] = []
|
||||||
|
|
||||||
|
for camera in new_activity.keys():
|
||||||
|
new_objects = new_activity[camera].get("objects", [])
|
||||||
|
all_objects.extend(new_objects)
|
||||||
|
|
||||||
|
if self.last_camera_activity.get(camera, {}).get("objects") != new_objects:
|
||||||
|
self.compare_camera_activity(camera, new_objects)
|
||||||
|
|
||||||
|
# run through every zone, getting a count of objects in that zone right now
|
||||||
|
for zone, labels in self.all_zone_labels.items():
|
||||||
|
all_zone_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in all_objects
|
||||||
|
if zone in obj["current_zones"]
|
||||||
|
)
|
||||||
|
active_zone_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in all_objects
|
||||||
|
if zone in obj["current_zones"] and not obj["stationary"]
|
||||||
|
)
|
||||||
|
any_changed = False
|
||||||
|
|
||||||
|
# run through each object and check what topics need to be updated for this zone
|
||||||
|
for label in labels:
|
||||||
|
new_count = all_zone_objects[label]
|
||||||
|
new_active_count = active_zone_objects[label]
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_count != self.zone_all_object_counts[zone][label]
|
||||||
|
or label not in self.zone_all_object_counts[zone]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{zone}/{label}", new_count)
|
||||||
|
self.zone_all_object_counts[zone][label] = new_count
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_active_count != self.zone_active_object_counts[zone][label]
|
||||||
|
or label not in self.zone_active_object_counts[zone]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{zone}/{label}/active", new_active_count)
|
||||||
|
self.zone_active_object_counts[zone][label] = new_active_count
|
||||||
|
|
||||||
|
if any_changed:
|
||||||
|
self.publish(f"{zone}/all", sum(list(all_zone_objects.values())))
|
||||||
|
self.publish(
|
||||||
|
f"{zone}/all/active", sum(list(active_zone_objects.values()))
|
||||||
|
)
|
||||||
|
|
||||||
|
self.last_camera_activity = new_activity
|
||||||
|
|
||||||
|
def compare_camera_activity(
|
||||||
|
self, camera: str, new_activity: dict[str, any]
|
||||||
|
) -> None:
|
||||||
|
all_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "") for obj in new_activity
|
||||||
|
)
|
||||||
|
active_objects = Counter(
|
||||||
|
obj["label"].replace("-verified", "")
|
||||||
|
for obj in new_activity
|
||||||
|
if not obj["stationary"]
|
||||||
|
)
|
||||||
|
any_changed = False
|
||||||
|
|
||||||
|
# run through each object and check what topics need to be updated
|
||||||
|
for label in self.config.cameras[camera].objects.track:
|
||||||
|
if label in self.config.model.all_attributes:
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_count = all_objects[label]
|
||||||
|
new_active_count = active_objects[label]
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_count != self.camera_all_object_counts[camera][label]
|
||||||
|
or label not in self.camera_all_object_counts[camera]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{camera}/{label}", new_count)
|
||||||
|
self.camera_all_object_counts[camera][label] = new_count
|
||||||
|
|
||||||
|
if (
|
||||||
|
new_active_count != self.camera_active_object_counts[camera][label]
|
||||||
|
or label not in self.camera_active_object_counts[camera]
|
||||||
|
):
|
||||||
|
any_changed = True
|
||||||
|
self.publish(f"{camera}/{label}/active", new_active_count)
|
||||||
|
self.camera_active_object_counts[camera][label] = new_active_count
|
||||||
|
|
||||||
|
if any_changed:
|
||||||
|
self.publish(f"{camera}/all", sum(list(all_objects.values())))
|
||||||
|
self.publish(f"{camera}/all/active", sum(list(active_objects.values())))
|
||||||
@ -7,6 +7,7 @@ from abc import ABC, abstractmethod
|
|||||||
from typing import Any, Callable, Optional
|
from typing import Any, Callable, Optional
|
||||||
|
|
||||||
from frigate.camera import PTZMetrics
|
from frigate.camera import PTZMetrics
|
||||||
|
from frigate.camera.activity_manager import CameraActivityManager
|
||||||
from frigate.comms.config_updater import ConfigPublisher
|
from frigate.comms.config_updater import ConfigPublisher
|
||||||
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
from frigate.config import BirdseyeModeEnum, FrigateConfig
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
@ -64,7 +65,7 @@ class Dispatcher:
|
|||||||
self.onvif = onvif
|
self.onvif = onvif
|
||||||
self.ptz_metrics = ptz_metrics
|
self.ptz_metrics = ptz_metrics
|
||||||
self.comms = communicators
|
self.comms = communicators
|
||||||
self.camera_activity = {}
|
self.camera_activity = CameraActivityManager(config, self.publish)
|
||||||
self.model_state = {}
|
self.model_state = {}
|
||||||
self.embeddings_reindex = {}
|
self.embeddings_reindex = {}
|
||||||
|
|
||||||
@ -130,7 +131,7 @@ class Dispatcher:
|
|||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
def handle_update_camera_activity():
|
def handle_update_camera_activity():
|
||||||
self.camera_activity = payload
|
self.camera_activity.update_activity(payload)
|
||||||
|
|
||||||
def handle_update_event_description():
|
def handle_update_event_description():
|
||||||
event: Event = Event.get(Event.id == payload["id"])
|
event: Event = Event.get(Event.id == payload["id"])
|
||||||
@ -171,7 +172,7 @@ class Dispatcher:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def handle_on_connect():
|
def handle_on_connect():
|
||||||
camera_status = self.camera_activity.copy()
|
camera_status = self.camera_activity.last_camera_activity.copy()
|
||||||
|
|
||||||
for camera in camera_status.keys():
|
for camera in camera_status.keys():
|
||||||
camera_status[camera]["config"] = {
|
camera_status[camera]["config"] = {
|
||||||
|
|||||||
@ -167,7 +167,7 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
record_args = get_ffmpeg_arg_list(
|
record_args = get_ffmpeg_arg_list(
|
||||||
parse_preset_output_record(
|
parse_preset_output_record(
|
||||||
self.ffmpeg.output_args.record,
|
self.ffmpeg.output_args.record,
|
||||||
self.ffmpeg.output_args._force_record_hvc1,
|
self.ffmpeg.apple_compatibility,
|
||||||
)
|
)
|
||||||
or self.ffmpeg.output_args.record
|
or self.ffmpeg.output_args.record
|
||||||
)
|
)
|
||||||
|
|||||||
@ -2,7 +2,7 @@ import shutil
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from pydantic import Field, PrivateAttr, field_validator
|
from pydantic import Field, field_validator
|
||||||
|
|
||||||
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
|
from frigate.const import DEFAULT_FFMPEG_VERSION, INCLUDED_FFMPEG_VERSIONS
|
||||||
|
|
||||||
@ -42,7 +42,6 @@ class FfmpegOutputArgsConfig(FrigateBaseModel):
|
|||||||
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
|
||||||
title="Record role FFmpeg output arguments.",
|
title="Record role FFmpeg output arguments.",
|
||||||
)
|
)
|
||||||
_force_record_hvc1: bool = PrivateAttr(default=False)
|
|
||||||
|
|
||||||
|
|
||||||
class FfmpegConfig(FrigateBaseModel):
|
class FfmpegConfig(FrigateBaseModel):
|
||||||
@ -64,6 +63,10 @@ class FfmpegConfig(FrigateBaseModel):
|
|||||||
default=10.0,
|
default=10.0,
|
||||||
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
|
||||||
)
|
)
|
||||||
|
apple_compatibility: bool = Field(
|
||||||
|
default=False,
|
||||||
|
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ffmpeg_path(self) -> str:
|
def ffmpeg_path(self) -> str:
|
||||||
|
|||||||
@ -458,13 +458,12 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
|
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
|
||||||
|
|
||||||
for input in camera_config.ffmpeg.inputs:
|
for input in camera_config.ffmpeg.inputs:
|
||||||
need_record_fourcc = False and "record" in input.roles
|
|
||||||
need_detect_dimensions = "detect" in input.roles and (
|
need_detect_dimensions = "detect" in input.roles and (
|
||||||
camera_config.detect.height is None
|
camera_config.detect.height is None
|
||||||
or camera_config.detect.width is None
|
or camera_config.detect.width is None
|
||||||
)
|
)
|
||||||
|
|
||||||
if need_detect_dimensions or need_record_fourcc:
|
if need_detect_dimensions:
|
||||||
stream_info = {"width": 0, "height": 0, "fourcc": None}
|
stream_info = {"width": 0, "height": 0, "fourcc": None}
|
||||||
try:
|
try:
|
||||||
stream_info = stream_info_retriever.get_stream_info(
|
stream_info = stream_info_retriever.get_stream_info(
|
||||||
@ -488,14 +487,6 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
else DEFAULT_DETECT_DIMENSIONS["height"]
|
else DEFAULT_DETECT_DIMENSIONS["height"]
|
||||||
)
|
)
|
||||||
|
|
||||||
if need_record_fourcc:
|
|
||||||
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
|
||||||
camera_config.ffmpeg.output_args._force_record_hvc1 = (
|
|
||||||
stream_info["fourcc"] == "hevc"
|
|
||||||
if stream_info.get("hevc")
|
|
||||||
else False
|
|
||||||
)
|
|
||||||
|
|
||||||
# Warn if detect fps > 10
|
# Warn if detect fps > 10
|
||||||
if camera_config.detect.fps > 10:
|
if camera_config.detect.fps > 10:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
|||||||
@ -23,17 +23,23 @@ class SemanticSearchConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class FaceRecognitionConfig(FrigateBaseModel):
|
class FaceRecognitionConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable face recognition.")
|
enabled: bool = Field(default=False, title="Enable face recognition.")
|
||||||
|
min_score: float = Field(
|
||||||
|
title="Minimum face distance score required to save the attempt.",
|
||||||
|
default=0.8,
|
||||||
|
gt=0.0,
|
||||||
|
le=1.0,
|
||||||
|
)
|
||||||
threshold: float = Field(
|
threshold: float = Field(
|
||||||
default=170,
|
default=0.9,
|
||||||
title="minimum face distance score required to be considered a match.",
|
title="Minimum face distance score required to be considered a match.",
|
||||||
gt=0.0,
|
gt=0.0,
|
||||||
le=1.0,
|
le=1.0,
|
||||||
)
|
)
|
||||||
min_area: int = Field(
|
min_area: int = Field(
|
||||||
default=500, title="Min area of face box to consider running face recognition."
|
default=500, title="Min area of face box to consider running face recognition."
|
||||||
)
|
)
|
||||||
debug_save_images: bool = Field(
|
save_attempts: bool = Field(
|
||||||
default=False, title="Save images of face detections for debugging."
|
default=True, title="Save images of face detections for training."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -11,6 +11,9 @@ class StatsConfig(FrigateBaseModel):
|
|||||||
network_bandwidth: bool = Field(
|
network_bandwidth: bool = Field(
|
||||||
default=False, title="Enable network bandwidth for ffmpeg processes."
|
default=False, title="Enable network bandwidth for ffmpeg processes."
|
||||||
)
|
)
|
||||||
|
sriov: bool = Field(
|
||||||
|
default=False, title="Treat device as SR-IOV to support GPU stats."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TelemetryConfig(FrigateBaseModel):
|
class TelemetryConfig(FrigateBaseModel):
|
||||||
|
|||||||
@ -65,6 +65,7 @@ INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
|
|||||||
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
|
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
|
||||||
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
|
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
|
||||||
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
|
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
|
||||||
|
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
|
||||||
|
|
||||||
# Regex constants
|
# Regex constants
|
||||||
|
|
||||||
|
|||||||
@ -108,7 +108,7 @@ class Rknn(DetectionApi):
|
|||||||
model_props["model_type"] = model_type
|
model_props["model_type"] = model_type
|
||||||
|
|
||||||
if model_matched:
|
if model_matched:
|
||||||
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
|
model_props["filename"] = model_path + f"-{soc}-v2.3.0-1.rknn"
|
||||||
|
|
||||||
model_props["path"] = model_cache_dir + model_props["filename"]
|
model_props["path"] = model_cache_dir + model_props["filename"]
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ class Rknn(DetectionApi):
|
|||||||
os.mkdir(model_cache_dir)
|
os.mkdir(model_cache_dir)
|
||||||
|
|
||||||
urllib.request.urlretrieve(
|
urllib.request.urlretrieve(
|
||||||
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
|
f"https://github.com/MarcA711/rknn-models/releases/download/v2.3.0/{filename}",
|
||||||
model_cache_dir + filename,
|
model_cache_dir + filename,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -21,12 +21,13 @@ from frigate.util.builtin import serialize
|
|||||||
from frigate.util.services import listen
|
from frigate.util.services import listen
|
||||||
|
|
||||||
from .maintainer import EmbeddingMaintainer
|
from .maintainer import EmbeddingMaintainer
|
||||||
|
from .types import EmbeddingsMetrics
|
||||||
from .util import ZScoreNormalization
|
from .util import ZScoreNormalization
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def manage_embeddings(config: FrigateConfig) -> None:
|
def manage_embeddings(config: FrigateConfig, metrics: EmbeddingsMetrics) -> None:
|
||||||
# Only initialize embeddings if semantic search is enabled
|
# Only initialize embeddings if semantic search is enabled
|
||||||
if not config.semantic_search.enabled:
|
if not config.semantic_search.enabled:
|
||||||
return
|
return
|
||||||
@ -60,6 +61,7 @@ def manage_embeddings(config: FrigateConfig) -> None:
|
|||||||
maintainer = EmbeddingMaintainer(
|
maintainer = EmbeddingMaintainer(
|
||||||
db,
|
db,
|
||||||
config,
|
config,
|
||||||
|
metrics,
|
||||||
stop_event,
|
stop_event,
|
||||||
)
|
)
|
||||||
maintainer.start()
|
maintainer.start()
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
"""SQLite-vec embeddings database."""
|
"""SQLite-vec embeddings database."""
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
@ -21,6 +22,7 @@ from frigate.types import ModelStatusTypesEnum
|
|||||||
from frigate.util.builtin import serialize
|
from frigate.util.builtin import serialize
|
||||||
|
|
||||||
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
||||||
|
from .types import EmbeddingsMetrics
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -59,9 +61,15 @@ def get_metadata(event: Event) -> dict:
|
|||||||
class Embeddings:
|
class Embeddings:
|
||||||
"""SQLite-vec embeddings database."""
|
"""SQLite-vec embeddings database."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig, db: SqliteVecQueueDatabase) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: FrigateConfig,
|
||||||
|
db: SqliteVecQueueDatabase,
|
||||||
|
metrics: EmbeddingsMetrics,
|
||||||
|
) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.db = db
|
self.db = db
|
||||||
|
self.metrics = metrics
|
||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
|
|
||||||
# Create tables if they don't exist
|
# Create tables if they don't exist
|
||||||
@ -123,19 +131,6 @@ class Embeddings:
|
|||||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.config.face_recognition.enabled:
|
|
||||||
self.face_embedding = GenericONNXEmbedding(
|
|
||||||
model_name="facedet",
|
|
||||||
model_file="facedet.onnx",
|
|
||||||
download_urls={
|
|
||||||
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
|
||||||
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
|
||||||
},
|
|
||||||
model_size="small",
|
|
||||||
model_type=ModelTypeEnum.face,
|
|
||||||
requestor=self.requestor,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.lpr_detection_model = None
|
self.lpr_detection_model = None
|
||||||
self.lpr_classification_model = None
|
self.lpr_classification_model = None
|
||||||
self.lpr_recognition_model = None
|
self.lpr_recognition_model = None
|
||||||
@ -186,6 +181,7 @@ class Embeddings:
|
|||||||
@param: thumbnail bytes in jpg format
|
@param: thumbnail bytes in jpg format
|
||||||
@param: upsert If embedding should be upserted into vec DB
|
@param: upsert If embedding should be upserted into vec DB
|
||||||
"""
|
"""
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
# Convert thumbnail bytes to PIL Image
|
# Convert thumbnail bytes to PIL Image
|
||||||
embedding = self.vision_embedding([thumbnail])[0]
|
embedding = self.vision_embedding([thumbnail])[0]
|
||||||
|
|
||||||
@ -198,6 +194,11 @@ class Embeddings:
|
|||||||
(event_id, serialize(embedding)),
|
(event_id, serialize(embedding)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.image_embeddings_fps.value = (
|
||||||
|
self.metrics.image_embeddings_fps.value * 9 + duration
|
||||||
|
) / 10
|
||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
def batch_embed_thumbnail(
|
def batch_embed_thumbnail(
|
||||||
@ -208,6 +209,7 @@ class Embeddings:
|
|||||||
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
|
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
|
||||||
@param: upsert If embedding should be upserted into vec DB
|
@param: upsert If embedding should be upserted into vec DB
|
||||||
"""
|
"""
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
ids = list(event_thumbs.keys())
|
ids = list(event_thumbs.keys())
|
||||||
embeddings = self.vision_embedding(list(event_thumbs.values()))
|
embeddings = self.vision_embedding(list(event_thumbs.values()))
|
||||||
|
|
||||||
@ -226,11 +228,17 @@ class Embeddings:
|
|||||||
items,
|
items,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.text_embeddings_sps.value = (
|
||||||
|
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
||||||
|
) / 10
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
def embed_description(
|
def embed_description(
|
||||||
self, event_id: str, description: str, upsert: bool = True
|
self, event_id: str, description: str, upsert: bool = True
|
||||||
) -> ndarray:
|
) -> ndarray:
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
embedding = self.text_embedding([description])[0]
|
embedding = self.text_embedding([description])[0]
|
||||||
|
|
||||||
if upsert:
|
if upsert:
|
||||||
@ -242,11 +250,17 @@ class Embeddings:
|
|||||||
(event_id, serialize(embedding)),
|
(event_id, serialize(embedding)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.text_embeddings_sps.value = (
|
||||||
|
self.metrics.text_embeddings_sps.value * 9 + duration
|
||||||
|
) / 10
|
||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
def batch_embed_description(
|
def batch_embed_description(
|
||||||
self, event_descriptions: dict[str, str], upsert: bool = True
|
self, event_descriptions: dict[str, str], upsert: bool = True
|
||||||
) -> ndarray:
|
) -> ndarray:
|
||||||
|
start = datetime.datetime.now().timestamp()
|
||||||
# upsert embeddings one by one to avoid token limit
|
# upsert embeddings one by one to avoid token limit
|
||||||
embeddings = []
|
embeddings = []
|
||||||
|
|
||||||
@ -269,6 +283,11 @@ class Embeddings:
|
|||||||
items,
|
items,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.text_embeddings_sps.value = (
|
||||||
|
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
||||||
|
) / 10
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
def reindex(self) -> None:
|
def reindex(self) -> None:
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
"""Maintain embeddings in SQLite-vec."""
|
"""Maintain embeddings in SQLite-vec."""
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
@ -41,6 +42,7 @@ from frigate.util.image import SharedMemoryFrameManager, area, calculate_region
|
|||||||
from frigate.util.model import FaceClassificationModel
|
from frigate.util.model import FaceClassificationModel
|
||||||
|
|
||||||
from .embeddings import Embeddings
|
from .embeddings import Embeddings
|
||||||
|
from .types import EmbeddingsMetrics
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -54,11 +56,13 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self,
|
self,
|
||||||
db: SqliteQueueDatabase,
|
db: SqliteQueueDatabase,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
|
metrics: EmbeddingsMetrics,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(name="embeddings_maintainer")
|
super().__init__(name="embeddings_maintainer")
|
||||||
self.config = config
|
self.config = config
|
||||||
self.embeddings = Embeddings(config, db)
|
self.metrics = metrics
|
||||||
|
self.embeddings = Embeddings(config, db, metrics)
|
||||||
|
|
||||||
# Check if we need to re-index events
|
# Check if we need to re-index events
|
||||||
if config.semantic_search.reindex:
|
if config.semantic_search.reindex:
|
||||||
@ -100,19 +104,6 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.lpr_config, self.requestor, self.embeddings
|
self.lpr_config, self.requestor, self.embeddings
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
|
||||||
def face_detector(self) -> cv2.FaceDetectorYN:
|
|
||||||
# Lazily create the classifier.
|
|
||||||
if "face_detector" not in self.__dict__:
|
|
||||||
self.__dict__["face_detector"] = cv2.FaceDetectorYN.create(
|
|
||||||
"/config/model_cache/facedet/facedet.onnx",
|
|
||||||
config="",
|
|
||||||
input_size=(320, 320),
|
|
||||||
score_threshold=0.8,
|
|
||||||
nms_threshold=0.3,
|
|
||||||
)
|
|
||||||
return self.__dict__["face_detector"]
|
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
"""Maintain a SQLite-vec database for semantic search."""
|
"""Maintain a SQLite-vec database for semantic search."""
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
@ -148,7 +139,8 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||||
return serialize(
|
return serialize(
|
||||||
self.embeddings.text_embedding([data])[0], pack=False
|
self.embeddings.embed_description("", data, upsert=False),
|
||||||
|
pack=False,
|
||||||
)
|
)
|
||||||
elif topic == EmbeddingsRequestEnum.register_face.value:
|
elif topic == EmbeddingsRequestEnum.register_face.value:
|
||||||
if not self.face_recognition_enabled:
|
if not self.face_recognition_enabled:
|
||||||
@ -232,10 +224,24 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if self.face_recognition_enabled:
|
if self.face_recognition_enabled:
|
||||||
self._process_face(data, yuv_frame)
|
start = datetime.datetime.now().timestamp()
|
||||||
|
processed = self._process_face(data, yuv_frame)
|
||||||
|
|
||||||
|
if processed:
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.face_rec_fps.value = (
|
||||||
|
self.metrics.face_rec_fps.value * 9 + duration
|
||||||
|
) / 10
|
||||||
|
|
||||||
if self.lpr_config.enabled:
|
if self.lpr_config.enabled:
|
||||||
self._process_license_plate(data, yuv_frame)
|
start = datetime.datetime.now().timestamp()
|
||||||
|
processed = self._process_license_plate(data, yuv_frame)
|
||||||
|
|
||||||
|
if processed:
|
||||||
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
|
self.metrics.alpr_pps.value = (
|
||||||
|
self.metrics.alpr_pps.value * 9 + duration
|
||||||
|
) / 10
|
||||||
|
|
||||||
# no need to save our own thumbnails if genai is not enabled
|
# no need to save our own thumbnails if genai is not enabled
|
||||||
# or if the object has become stationary
|
# or if the object has become stationary
|
||||||
@ -395,10 +401,9 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
def _detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
def _detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||||
"""Detect faces in input image."""
|
"""Detect faces in input image."""
|
||||||
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
faces = self.face_classifier.detect_faces(input)
|
||||||
faces = self.face_detector.detect(input)
|
|
||||||
|
|
||||||
if faces[1] is None:
|
if faces is None or faces[1] is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
face = None
|
face = None
|
||||||
@ -416,14 +421,14 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
return face
|
return face
|
||||||
|
|
||||||
def _process_face(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
|
def _process_face(self, obj_data: dict[str, any], frame: np.ndarray) -> bool:
|
||||||
"""Look for faces in image."""
|
"""Look for faces in image."""
|
||||||
id = obj_data["id"]
|
id = obj_data["id"]
|
||||||
|
|
||||||
# don't run for non person objects
|
# don't run for non person objects
|
||||||
if obj_data.get("label") != "person":
|
if obj_data.get("label") != "person":
|
||||||
logger.debug("Not a processing face for non person object.")
|
logger.debug("Not a processing face for non person object.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
# don't overwrite sub label for objects that have a sub label
|
# don't overwrite sub label for objects that have a sub label
|
||||||
# that is not a face
|
# that is not a face
|
||||||
@ -431,7 +436,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
|
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
|
||||||
)
|
)
|
||||||
return
|
return False
|
||||||
|
|
||||||
face: Optional[dict[str, any]] = None
|
face: Optional[dict[str, any]] = None
|
||||||
|
|
||||||
@ -440,7 +445,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
person_box = obj_data.get("box")
|
person_box = obj_data.get("box")
|
||||||
|
|
||||||
if not person_box:
|
if not person_box:
|
||||||
return None
|
return False
|
||||||
|
|
||||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
left, top, right, bottom = person_box
|
left, top, right, bottom = person_box
|
||||||
@ -449,7 +454,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
if not face_box:
|
if not face_box:
|
||||||
logger.debug("Detected no faces for person object.")
|
logger.debug("Detected no faces for person object.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
margin = int((face_box[2] - face_box[0]) * 0.25)
|
margin = int((face_box[2] - face_box[0]) * 0.25)
|
||||||
face_frame = person[
|
face_frame = person[
|
||||||
@ -465,7 +470,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
# don't run for object without attributes
|
# don't run for object without attributes
|
||||||
if not obj_data.get("current_attributes"):
|
if not obj_data.get("current_attributes"):
|
||||||
logger.debug("No attributes to parse.")
|
logger.debug("No attributes to parse.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||||
for attr in attributes:
|
for attr in attributes:
|
||||||
@ -477,14 +482,14 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
# no faces detected in this frame
|
# no faces detected in this frame
|
||||||
if not face:
|
if not face:
|
||||||
return
|
return False
|
||||||
|
|
||||||
face_box = face.get("box")
|
face_box = face.get("box")
|
||||||
|
|
||||||
# check that face is valid
|
# check that face is valid
|
||||||
if not face_box or area(face_box) < self.config.face_recognition.min_area:
|
if not face_box or area(face_box) < self.config.face_recognition.min_area:
|
||||||
logger.debug(f"Invalid face box {face}")
|
logger.debug(f"Invalid face box {face}")
|
||||||
return
|
return False
|
||||||
|
|
||||||
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
margin = int((face_box[2] - face_box[0]) * 0.25)
|
margin = int((face_box[2] - face_box[0]) * 0.25)
|
||||||
@ -501,7 +506,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
res = self.face_classifier.classify_face(face_frame)
|
res = self.face_classifier.classify_face(face_frame)
|
||||||
|
|
||||||
if not res:
|
if not res:
|
||||||
return
|
return False
|
||||||
|
|
||||||
sub_label, score = res
|
sub_label, score = res
|
||||||
|
|
||||||
@ -515,18 +520,24 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
|
f"Detected best face for person as: {sub_label} with probability {score} and overall face score {face_score}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.config.face_recognition.debug_save_images:
|
if self.config.face_recognition.save_attempts:
|
||||||
# write face to library
|
# write face to library
|
||||||
folder = os.path.join(FACE_DIR, "debug")
|
folder = os.path.join(FACE_DIR, "train")
|
||||||
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
|
file = os.path.join(folder, f"{id}-{sub_label}-{score}-{face_score}.webp")
|
||||||
os.makedirs(folder, exist_ok=True)
|
os.makedirs(folder, exist_ok=True)
|
||||||
cv2.imwrite(file, face_frame)
|
cv2.imwrite(file, face_frame)
|
||||||
|
|
||||||
|
if score < self.config.face_recognition.threshold:
|
||||||
|
logger.debug(
|
||||||
|
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
if id in self.detected_faces and face_score <= self.detected_faces[id]:
|
if id in self.detected_faces and face_score <= self.detected_faces[id]:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
|
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
|
||||||
)
|
)
|
||||||
return
|
return True
|
||||||
|
|
||||||
resp = requests.post(
|
resp = requests.post(
|
||||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||||
@ -540,6 +551,8 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
self.detected_faces[id] = face_score
|
self.detected_faces[id] = face_score
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||||
"""Return the dimensions of the input image as [x, y, width, height]."""
|
"""Return the dimensions of the input image as [x, y, width, height]."""
|
||||||
height, width = input.shape[:2]
|
height, width = input.shape[:2]
|
||||||
@ -547,19 +560,19 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
def _process_license_plate(
|
def _process_license_plate(
|
||||||
self, obj_data: dict[str, any], frame: np.ndarray
|
self, obj_data: dict[str, any], frame: np.ndarray
|
||||||
) -> None:
|
) -> bool:
|
||||||
"""Look for license plates in image."""
|
"""Look for license plates in image."""
|
||||||
id = obj_data["id"]
|
id = obj_data["id"]
|
||||||
|
|
||||||
# don't run for non car objects
|
# don't run for non car objects
|
||||||
if obj_data.get("label") != "car":
|
if obj_data.get("label") != "car":
|
||||||
logger.debug("Not a processing license plate for non car object.")
|
logger.debug("Not a processing license plate for non car object.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
# don't run for stationary car objects
|
# don't run for stationary car objects
|
||||||
if obj_data.get("stationary") == True:
|
if obj_data.get("stationary") == True:
|
||||||
logger.debug("Not a processing license plate for a stationary car object.")
|
logger.debug("Not a processing license plate for a stationary car object.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
# don't overwrite sub label for objects that have a sub label
|
# don't overwrite sub label for objects that have a sub label
|
||||||
# that is not a license plate
|
# that is not a license plate
|
||||||
@ -567,7 +580,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
|
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
|
||||||
)
|
)
|
||||||
return
|
return False
|
||||||
|
|
||||||
license_plate: Optional[dict[str, any]] = None
|
license_plate: Optional[dict[str, any]] = None
|
||||||
|
|
||||||
@ -576,7 +589,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
car_box = obj_data.get("box")
|
car_box = obj_data.get("box")
|
||||||
|
|
||||||
if not car_box:
|
if not car_box:
|
||||||
return None
|
return False
|
||||||
|
|
||||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
left, top, right, bottom = car_box
|
left, top, right, bottom = car_box
|
||||||
@ -585,7 +598,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
if not license_plate:
|
if not license_plate:
|
||||||
logger.debug("Detected no license plates for car object.")
|
logger.debug("Detected no license plates for car object.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
license_plate_frame = car[
|
license_plate_frame = car[
|
||||||
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
|
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
|
||||||
@ -595,7 +608,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
# don't run for object without attributes
|
# don't run for object without attributes
|
||||||
if not obj_data.get("current_attributes"):
|
if not obj_data.get("current_attributes"):
|
||||||
logger.debug("No attributes to parse.")
|
logger.debug("No attributes to parse.")
|
||||||
return
|
return False
|
||||||
|
|
||||||
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
|
||||||
for attr in attributes:
|
for attr in attributes:
|
||||||
@ -609,7 +622,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
# no license plates detected in this frame
|
# no license plates detected in this frame
|
||||||
if not license_plate:
|
if not license_plate:
|
||||||
return
|
return False
|
||||||
|
|
||||||
license_plate_box = license_plate.get("box")
|
license_plate_box = license_plate.get("box")
|
||||||
|
|
||||||
@ -619,7 +632,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
or area(license_plate_box) < self.config.lpr.min_area
|
or area(license_plate_box) < self.config.lpr.min_area
|
||||||
):
|
):
|
||||||
logger.debug(f"Invalid license plate box {license_plate}")
|
logger.debug(f"Invalid license plate box {license_plate}")
|
||||||
return
|
return False
|
||||||
|
|
||||||
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
license_plate_frame = license_plate_frame[
|
license_plate_frame = license_plate_frame[
|
||||||
@ -648,7 +661,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
# no plates found
|
# no plates found
|
||||||
logger.debug("No text detected")
|
logger.debug("No text detected")
|
||||||
return
|
return True
|
||||||
|
|
||||||
top_plate, top_char_confidences, top_area = (
|
top_plate, top_char_confidences, top_area = (
|
||||||
license_plates[0],
|
license_plates[0],
|
||||||
@ -694,14 +707,14 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} "
|
f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} "
|
||||||
f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}"
|
f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}"
|
||||||
)
|
)
|
||||||
return
|
return True
|
||||||
|
|
||||||
# Check against minimum confidence threshold
|
# Check against minimum confidence threshold
|
||||||
if avg_confidence < self.lpr_config.threshold:
|
if avg_confidence < self.lpr_config.threshold:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})"
|
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})"
|
||||||
)
|
)
|
||||||
return
|
return True
|
||||||
|
|
||||||
# Determine subLabel based on known plates, use regex matching
|
# Determine subLabel based on known plates, use regex matching
|
||||||
# Default to the detected plate, use label name if there's a match
|
# Default to the detected plate, use label name if there's a match
|
||||||
@ -731,6 +744,8 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
"area": top_area,
|
"area": top_area,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
||||||
"""Return jpg thumbnail of a region of the frame."""
|
"""Return jpg thumbnail of a region of the frame."""
|
||||||
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
|
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)
|
||||||
|
|||||||
17
frigate/embeddings/types.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
"""Embeddings types."""
|
||||||
|
|
||||||
|
import multiprocessing as mp
|
||||||
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingsMetrics:
|
||||||
|
image_embeddings_fps: Synchronized
|
||||||
|
text_embeddings_sps: Synchronized
|
||||||
|
face_rec_fps: Synchronized
|
||||||
|
alpr_pps: Synchronized
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.image_embeddings_fps = mp.Value("d", 0.01)
|
||||||
|
self.text_embeddings_sps = mp.Value("d", 0.01)
|
||||||
|
self.face_rec_fps = mp.Value("d", 0.01)
|
||||||
|
self.alpr_pps = mp.Value("d", 0.01)
|
||||||
@ -6,6 +6,7 @@ from enum import Enum
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
|
FFMPEG_HVC1_ARGS,
|
||||||
FFMPEG_HWACCEL_NVIDIA,
|
FFMPEG_HWACCEL_NVIDIA,
|
||||||
FFMPEG_HWACCEL_VAAPI,
|
FFMPEG_HWACCEL_VAAPI,
|
||||||
FFMPEG_HWACCEL_VULKAN,
|
FFMPEG_HWACCEL_VULKAN,
|
||||||
@ -497,6 +498,6 @@ def parse_preset_output_record(arg: Any, force_record_hvc1: bool) -> list[str]:
|
|||||||
|
|
||||||
if force_record_hvc1:
|
if force_record_hvc1:
|
||||||
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
# Apple only supports HEVC if it is hvc1 (vs. hev1)
|
||||||
preset += ["-tag:v", "hvc1"]
|
preset += FFMPEG_HVC1_ARGS
|
||||||
|
|
||||||
return preset
|
return preset
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
[mypy]
|
[mypy]
|
||||||
python_version = 3.9
|
python_version = 3.11
|
||||||
show_error_codes = true
|
show_error_codes = true
|
||||||
follow_imports = normal
|
follow_imports = normal
|
||||||
ignore_missing_imports = true
|
ignore_missing_imports = true
|
||||||
|
|||||||
@ -4,7 +4,7 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import queue
|
import queue
|
||||||
import threading
|
import threading
|
||||||
from collections import Counter, defaultdict
|
from collections import defaultdict
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Callable, Optional
|
from typing import Callable, Optional
|
||||||
|
|
||||||
@ -51,8 +51,6 @@ class CameraState:
|
|||||||
self.camera_config = config.cameras[name]
|
self.camera_config = config.cameras[name]
|
||||||
self.frame_manager = frame_manager
|
self.frame_manager = frame_manager
|
||||||
self.best_objects: dict[str, TrackedObject] = {}
|
self.best_objects: dict[str, TrackedObject] = {}
|
||||||
self.object_counts = defaultdict(int)
|
|
||||||
self.active_object_counts = defaultdict(int)
|
|
||||||
self.tracked_objects: dict[str, TrackedObject] = {}
|
self.tracked_objects: dict[str, TrackedObject] = {}
|
||||||
self.frame_cache = {}
|
self.frame_cache = {}
|
||||||
self.zone_objects = defaultdict(list)
|
self.zone_objects = defaultdict(list)
|
||||||
@ -338,6 +336,7 @@ class CameraState:
|
|||||||
"ratio": obj.obj_data["ratio"],
|
"ratio": obj.obj_data["ratio"],
|
||||||
"score": obj.obj_data["score"],
|
"score": obj.obj_data["score"],
|
||||||
"sub_label": sub_label,
|
"sub_label": sub_label,
|
||||||
|
"current_zones": obj.current_zones,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -377,78 +376,6 @@ class CameraState:
|
|||||||
for c in self.callbacks["camera_activity"]:
|
for c in self.callbacks["camera_activity"]:
|
||||||
c(self.name, camera_activity)
|
c(self.name, camera_activity)
|
||||||
|
|
||||||
# update overall camera state for each object type
|
|
||||||
obj_counter = Counter(
|
|
||||||
obj.obj_data["label"]
|
|
||||||
for obj in tracked_objects.values()
|
|
||||||
if not obj.false_positive
|
|
||||||
)
|
|
||||||
|
|
||||||
active_obj_counter = Counter(
|
|
||||||
obj.obj_data["label"]
|
|
||||||
for obj in tracked_objects.values()
|
|
||||||
if not obj.false_positive and obj.active
|
|
||||||
)
|
|
||||||
|
|
||||||
# keep track of all labels detected for this camera
|
|
||||||
total_label_count = 0
|
|
||||||
total_active_label_count = 0
|
|
||||||
|
|
||||||
# report on all detected objects
|
|
||||||
for obj_name, count in obj_counter.items():
|
|
||||||
total_label_count += count
|
|
||||||
|
|
||||||
if count != self.object_counts[obj_name]:
|
|
||||||
self.object_counts[obj_name] = count
|
|
||||||
for c in self.callbacks["object_status"]:
|
|
||||||
c(self.name, obj_name, count)
|
|
||||||
|
|
||||||
# update the active count on all detected objects
|
|
||||||
# To ensure we emit 0's if all objects are stationary, we need to loop
|
|
||||||
# over the set of all objects, not just active ones.
|
|
||||||
for obj_name in set(obj_counter):
|
|
||||||
count = active_obj_counter[obj_name]
|
|
||||||
total_active_label_count += count
|
|
||||||
|
|
||||||
if count != self.active_object_counts[obj_name]:
|
|
||||||
self.active_object_counts[obj_name] = count
|
|
||||||
for c in self.callbacks["active_object_status"]:
|
|
||||||
c(self.name, obj_name, count)
|
|
||||||
|
|
||||||
# publish for all labels detected for this camera
|
|
||||||
if total_label_count != self.object_counts.get("all"):
|
|
||||||
self.object_counts["all"] = total_label_count
|
|
||||||
for c in self.callbacks["object_status"]:
|
|
||||||
c(self.name, "all", total_label_count)
|
|
||||||
|
|
||||||
# publish active label counts for this camera
|
|
||||||
if total_active_label_count != self.active_object_counts.get("all"):
|
|
||||||
self.active_object_counts["all"] = total_active_label_count
|
|
||||||
for c in self.callbacks["active_object_status"]:
|
|
||||||
c(self.name, "all", total_active_label_count)
|
|
||||||
|
|
||||||
# expire any objects that are >0 and no longer detected
|
|
||||||
expired_objects = [
|
|
||||||
obj_name
|
|
||||||
for obj_name, count in self.object_counts.items()
|
|
||||||
if count > 0 and obj_name not in obj_counter
|
|
||||||
]
|
|
||||||
for obj_name in expired_objects:
|
|
||||||
# Ignore the artificial all label
|
|
||||||
if obj_name == "all":
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.object_counts[obj_name] = 0
|
|
||||||
for c in self.callbacks["object_status"]:
|
|
||||||
c(self.name, obj_name, 0)
|
|
||||||
# Only publish if the object was previously active.
|
|
||||||
if self.active_object_counts[obj_name] > 0:
|
|
||||||
for c in self.callbacks["active_object_status"]:
|
|
||||||
c(self.name, obj_name, 0)
|
|
||||||
self.active_object_counts[obj_name] = 0
|
|
||||||
for c in self.callbacks["snapshot"]:
|
|
||||||
c(self.name, self.best_objects[obj_name], frame_name)
|
|
||||||
|
|
||||||
# cleanup thumbnail frame cache
|
# cleanup thumbnail frame cache
|
||||||
current_thumb_frames = {
|
current_thumb_frames = {
|
||||||
obj.thumbnail_data["frame_time"]
|
obj.thumbnail_data["frame_time"]
|
||||||
@ -635,14 +562,6 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
retain=True,
|
retain=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
def object_status(camera, object_name, status):
|
|
||||||
self.dispatcher.publish(f"{camera}/{object_name}", status, retain=False)
|
|
||||||
|
|
||||||
def active_object_status(camera, object_name, status):
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{camera}/{object_name}/active", status, retain=False
|
|
||||||
)
|
|
||||||
|
|
||||||
def camera_activity(camera, activity):
|
def camera_activity(camera, activity):
|
||||||
last_activity = self.camera_activity.get(camera)
|
last_activity = self.camera_activity.get(camera)
|
||||||
|
|
||||||
@ -659,8 +578,6 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
camera_state.on("update", update)
|
camera_state.on("update", update)
|
||||||
camera_state.on("end", end)
|
camera_state.on("end", end)
|
||||||
camera_state.on("snapshot", snapshot)
|
camera_state.on("snapshot", snapshot)
|
||||||
camera_state.on("object_status", object_status)
|
|
||||||
camera_state.on("active_object_status", active_object_status)
|
|
||||||
camera_state.on("camera_activity", camera_activity)
|
camera_state.on("camera_activity", camera_activity)
|
||||||
self.camera_states[camera] = camera_state
|
self.camera_states[camera] = camera_state
|
||||||
|
|
||||||
@ -817,124 +734,6 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# update zone counts for each label
|
|
||||||
# for each zone in the current camera
|
|
||||||
for zone in self.config.cameras[camera].zones.keys():
|
|
||||||
# count labels for the camera in the zone
|
|
||||||
obj_counter = Counter(
|
|
||||||
obj.obj_data["label"]
|
|
||||||
for obj in camera_state.tracked_objects.values()
|
|
||||||
if zone in obj.current_zones and not obj.false_positive
|
|
||||||
)
|
|
||||||
active_obj_counter = Counter(
|
|
||||||
obj.obj_data["label"]
|
|
||||||
for obj in camera_state.tracked_objects.values()
|
|
||||||
if (
|
|
||||||
zone in obj.current_zones
|
|
||||||
and not obj.false_positive
|
|
||||||
and obj.active
|
|
||||||
)
|
|
||||||
)
|
|
||||||
total_label_count = 0
|
|
||||||
total_active_label_count = 0
|
|
||||||
|
|
||||||
# update counts and publish status
|
|
||||||
for label in set(self.zone_data[zone].keys()) | set(obj_counter.keys()):
|
|
||||||
# Ignore the artificial all label
|
|
||||||
if label == "all":
|
|
||||||
continue
|
|
||||||
|
|
||||||
# if we have previously published a count for this zone/label
|
|
||||||
zone_label = self.zone_data[zone][label]
|
|
||||||
active_zone_label = self.active_zone_data[zone][label]
|
|
||||||
if camera in zone_label:
|
|
||||||
current_count = sum(zone_label.values())
|
|
||||||
current_active_count = sum(active_zone_label.values())
|
|
||||||
zone_label[camera] = (
|
|
||||||
obj_counter[label] if label in obj_counter else 0
|
|
||||||
)
|
|
||||||
active_zone_label[camera] = (
|
|
||||||
active_obj_counter[label]
|
|
||||||
if label in active_obj_counter
|
|
||||||
else 0
|
|
||||||
)
|
|
||||||
new_count = sum(zone_label.values())
|
|
||||||
new_active_count = sum(active_zone_label.values())
|
|
||||||
if new_count != current_count:
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/{label}",
|
|
||||||
new_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
if new_active_count != current_active_count:
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/{label}/active",
|
|
||||||
new_active_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set the count for the /zone/all topic.
|
|
||||||
total_label_count += new_count
|
|
||||||
total_active_label_count += new_active_count
|
|
||||||
|
|
||||||
# if this is a new zone/label combo for this camera
|
|
||||||
else:
|
|
||||||
if label in obj_counter:
|
|
||||||
zone_label[camera] = obj_counter[label]
|
|
||||||
active_zone_label[camera] = active_obj_counter[label]
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/{label}",
|
|
||||||
obj_counter[label],
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/{label}/active",
|
|
||||||
active_obj_counter[label],
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set the count for the /zone/all topic.
|
|
||||||
total_label_count += obj_counter[label]
|
|
||||||
total_active_label_count += active_obj_counter[label]
|
|
||||||
|
|
||||||
# if we have previously published a count for this zone all labels
|
|
||||||
zone_label = self.zone_data[zone]["all"]
|
|
||||||
active_zone_label = self.active_zone_data[zone]["all"]
|
|
||||||
if camera in zone_label:
|
|
||||||
current_count = sum(zone_label.values())
|
|
||||||
current_active_count = sum(active_zone_label.values())
|
|
||||||
zone_label[camera] = total_label_count
|
|
||||||
active_zone_label[camera] = total_active_label_count
|
|
||||||
new_count = sum(zone_label.values())
|
|
||||||
new_active_count = sum(active_zone_label.values())
|
|
||||||
|
|
||||||
if new_count != current_count:
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/all",
|
|
||||||
new_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
if new_active_count != current_active_count:
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/all/active",
|
|
||||||
new_active_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
# if this is a new zone all label for this camera
|
|
||||||
else:
|
|
||||||
zone_label[camera] = total_label_count
|
|
||||||
active_zone_label[camera] = total_active_label_count
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/all",
|
|
||||||
total_label_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
self.dispatcher.publish(
|
|
||||||
f"{zone}/all/active",
|
|
||||||
total_active_label_count,
|
|
||||||
retain=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# cleanup event finished queue
|
# cleanup event finished queue
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
update = self.event_end_subscriber.check_for_update(timeout=0.01)
|
update = self.event_end_subscriber.check_for_update(timeout=0.01)
|
||||||
|
|||||||
@ -19,6 +19,7 @@ from frigate.const import (
|
|||||||
CACHE_DIR,
|
CACHE_DIR,
|
||||||
CLIPS_DIR,
|
CLIPS_DIR,
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
|
FFMPEG_HVC1_ARGS,
|
||||||
MAX_PLAYLIST_SECONDS,
|
MAX_PLAYLIST_SECONDS,
|
||||||
PREVIEW_FRAME_TYPE,
|
PREVIEW_FRAME_TYPE,
|
||||||
)
|
)
|
||||||
@ -219,7 +220,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
|
|
||||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart {video_path}"
|
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart"
|
||||||
).split(" ")
|
).split(" ")
|
||||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||||
ffmpeg_cmd = (
|
ffmpeg_cmd = (
|
||||||
@ -227,11 +228,16 @@ class RecordingExporter(threading.Thread):
|
|||||||
self.config.ffmpeg.ffmpeg_path,
|
self.config.ffmpeg.ffmpeg_path,
|
||||||
self.config.ffmpeg.hwaccel_args,
|
self.config.ffmpeg.hwaccel_args,
|
||||||
f"-an {ffmpeg_input}",
|
f"-an {ffmpeg_input}",
|
||||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart",
|
||||||
EncodeTypeEnum.timelapse,
|
EncodeTypeEnum.timelapse,
|
||||||
)
|
)
|
||||||
).split(" ")
|
).split(" ")
|
||||||
|
|
||||||
|
if self.config.ffmpeg.apple_compatibility:
|
||||||
|
ffmpeg_cmd += FFMPEG_HVC1_ARGS
|
||||||
|
|
||||||
|
ffmpeg_cmd.append(video_path)
|
||||||
|
|
||||||
return ffmpeg_cmd, playlist_lines
|
return ffmpeg_cmd, playlist_lines
|
||||||
|
|
||||||
def get_preview_export_command(self, video_path: str) -> list[str]:
|
def get_preview_export_command(self, video_path: str) -> list[str]:
|
||||||
|
|||||||
@ -26,7 +26,7 @@ class Service(ABC):
|
|||||||
self.__dict__["name"] = name
|
self.__dict__["name"] = name
|
||||||
|
|
||||||
self.__manager = manager or ServiceManager.current()
|
self.__manager = manager or ServiceManager.current()
|
||||||
self.__lock = asyncio.Lock(loop=self.__manager._event_loop)
|
self.__lock = asyncio.Lock(loop=self.__manager._event_loop) # type: ignore[call-arg]
|
||||||
self.__manager._register(self)
|
self.__manager._register(self)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@ -14,6 +14,7 @@ from requests.exceptions import RequestException
|
|||||||
from frigate.camera import CameraMetrics
|
from frigate.camera import CameraMetrics
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
||||||
|
from frigate.embeddings.types import EmbeddingsMetrics
|
||||||
from frigate.object_detection import ObjectDetectProcess
|
from frigate.object_detection import ObjectDetectProcess
|
||||||
from frigate.types import StatsTrackingTypes
|
from frigate.types import StatsTrackingTypes
|
||||||
from frigate.util.services import (
|
from frigate.util.services import (
|
||||||
@ -51,11 +52,13 @@ def get_latest_version(config: FrigateConfig) -> str:
|
|||||||
def stats_init(
|
def stats_init(
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
camera_metrics: dict[str, CameraMetrics],
|
camera_metrics: dict[str, CameraMetrics],
|
||||||
|
embeddings_metrics: EmbeddingsMetrics | None,
|
||||||
detectors: dict[str, ObjectDetectProcess],
|
detectors: dict[str, ObjectDetectProcess],
|
||||||
processes: dict[str, int],
|
processes: dict[str, int],
|
||||||
) -> StatsTrackingTypes:
|
) -> StatsTrackingTypes:
|
||||||
stats_tracking: StatsTrackingTypes = {
|
stats_tracking: StatsTrackingTypes = {
|
||||||
"camera_metrics": camera_metrics,
|
"camera_metrics": camera_metrics,
|
||||||
|
"embeddings_metrics": embeddings_metrics,
|
||||||
"detectors": detectors,
|
"detectors": detectors,
|
||||||
"started": int(time.time()),
|
"started": int(time.time()),
|
||||||
"latest_frigate_version": get_latest_version(config),
|
"latest_frigate_version": get_latest_version(config),
|
||||||
@ -195,7 +198,7 @@ async def set_gpu_stats(
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# intel QSV GPU
|
# intel QSV GPU
|
||||||
intel_usage = get_intel_gpu_stats()
|
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||||
|
|
||||||
if intel_usage is not None:
|
if intel_usage is not None:
|
||||||
stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""}
|
stats["intel-qsv"] = intel_usage or {"gpu": "", "mem": ""}
|
||||||
@ -220,7 +223,7 @@ async def set_gpu_stats(
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# intel VAAPI GPU
|
# intel VAAPI GPU
|
||||||
intel_usage = get_intel_gpu_stats()
|
intel_usage = get_intel_gpu_stats(config.telemetry.stats.sriov)
|
||||||
|
|
||||||
if intel_usage is not None:
|
if intel_usage is not None:
|
||||||
stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""}
|
stats["intel-vaapi"] = intel_usage or {"gpu": "", "mem": ""}
|
||||||
@ -279,6 +282,27 @@ def stats_snapshot(
|
|||||||
}
|
}
|
||||||
stats["detection_fps"] = round(total_detection_fps, 2)
|
stats["detection_fps"] = round(total_detection_fps, 2)
|
||||||
|
|
||||||
|
if config.semantic_search.enabled:
|
||||||
|
embeddings_metrics = stats_tracking["embeddings_metrics"]
|
||||||
|
stats["embeddings"] = {
|
||||||
|
"image_embedding_speed": round(
|
||||||
|
embeddings_metrics.image_embeddings_fps.value * 1000, 2
|
||||||
|
),
|
||||||
|
"text_embedding_speed": round(
|
||||||
|
embeddings_metrics.text_embeddings_sps.value * 1000, 2
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.face_recognition.enabled:
|
||||||
|
stats["embeddings"]["face_recognition_speed"] = round(
|
||||||
|
embeddings_metrics.face_rec_fps.value * 1000, 2
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.lpr.enabled:
|
||||||
|
stats["embeddings"]["plate_recognition_speed"] = round(
|
||||||
|
embeddings_metrics.alpr_pps.value * 1000, 2
|
||||||
|
)
|
||||||
|
|
||||||
get_processing_stats(config, stats, hwaccel_errors)
|
get_processing_stats(config, stats, hwaccel_errors)
|
||||||
|
|
||||||
stats["service"] = {
|
stats["service"] = {
|
||||||
|
|||||||
@ -38,7 +38,7 @@ class TestGpuStats(unittest.TestCase):
|
|||||||
process.returncode = 124
|
process.returncode = 124
|
||||||
process.stdout = self.intel_results
|
process.stdout = self.intel_results
|
||||||
sp.return_value = process
|
sp.return_value = process
|
||||||
intel_stats = get_intel_gpu_stats()
|
intel_stats = get_intel_gpu_stats(False)
|
||||||
print(f"the intel stats are {intel_stats}")
|
print(f"the intel stats are {intel_stats}")
|
||||||
assert intel_stats == {
|
assert intel_stats == {
|
||||||
"gpu": "1.13%",
|
"gpu": "1.13%",
|
||||||
|
|||||||
@ -2,11 +2,13 @@ from enum import Enum
|
|||||||
from typing import TypedDict
|
from typing import TypedDict
|
||||||
|
|
||||||
from frigate.camera import CameraMetrics
|
from frigate.camera import CameraMetrics
|
||||||
|
from frigate.embeddings.types import EmbeddingsMetrics
|
||||||
from frigate.object_detection import ObjectDetectProcess
|
from frigate.object_detection import ObjectDetectProcess
|
||||||
|
|
||||||
|
|
||||||
class StatsTrackingTypes(TypedDict):
|
class StatsTrackingTypes(TypedDict):
|
||||||
camera_metrics: dict[str, CameraMetrics]
|
camera_metrics: dict[str, CameraMetrics]
|
||||||
|
embeddings_metrics: EmbeddingsMetrics | None
|
||||||
detectors: dict[str, ObjectDetectProcess]
|
detectors: dict[str, ObjectDetectProcess]
|
||||||
started: int
|
started: int
|
||||||
latest_frigate_version: str
|
latest_frigate_version: str
|
||||||
|
|||||||
@ -51,12 +51,14 @@ class ModelDownloader:
|
|||||||
download_path: str,
|
download_path: str,
|
||||||
file_names: List[str],
|
file_names: List[str],
|
||||||
download_func: Callable[[str], None],
|
download_func: Callable[[str], None],
|
||||||
|
complete_func: Callable[[], None] | None = None,
|
||||||
silent: bool = False,
|
silent: bool = False,
|
||||||
):
|
):
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.download_path = download_path
|
self.download_path = download_path
|
||||||
self.file_names = file_names
|
self.file_names = file_names
|
||||||
self.download_func = download_func
|
self.download_func = download_func
|
||||||
|
self.complete_func = complete_func
|
||||||
self.silent = silent
|
self.silent = silent
|
||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
self.download_thread = None
|
self.download_thread = None
|
||||||
@ -97,6 +99,9 @@ class ModelDownloader:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.complete_func:
|
||||||
|
self.complete_func()
|
||||||
|
|
||||||
self.requestor.stop()
|
self.requestor.stop()
|
||||||
self.download_complete.set()
|
self.download_complete.set()
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Any, Optional
|
from typing import Any
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -10,6 +10,7 @@ import onnxruntime as ort
|
|||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
|
|
||||||
from frigate.config.semantic_search import FaceRecognitionConfig
|
from frigate.config.semantic_search import FaceRecognitionConfig
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import openvino as ov
|
import openvino as ov
|
||||||
@ -162,34 +163,92 @@ class FaceClassificationModel:
|
|||||||
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.db = db
|
self.db = db
|
||||||
self.landmark_detector = cv2.face.createFacemarkLBF()
|
self.face_detector: cv2.FaceDetectorYN = None
|
||||||
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
self.landmark_detector: cv2.face.FacemarkLBF = None
|
||||||
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
|
||||||
cv2.face.LBPHFaceRecognizer_create(
|
|
||||||
radius=2, threshold=(1 - config.threshold) * 1000
|
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
||||||
|
self.model_files = {
|
||||||
|
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||||
|
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||||
|
}
|
||||||
|
|
||||||
|
if not all(
|
||||||
|
os.path.exists(os.path.join(download_path, n))
|
||||||
|
for n in self.model_files.keys()
|
||||||
|
):
|
||||||
|
# conditionally import ModelDownloader
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
|
||||||
|
self.downloader = ModelDownloader(
|
||||||
|
model_name="facedet",
|
||||||
|
download_path=download_path,
|
||||||
|
file_names=self.model_files.keys(),
|
||||||
|
download_func=self.__download_models,
|
||||||
|
complete_func=self.__build_detector,
|
||||||
)
|
)
|
||||||
)
|
self.downloader.ensure_model_files()
|
||||||
|
else:
|
||||||
|
self.__build_detector()
|
||||||
|
|
||||||
self.label_map: dict[int, str] = {}
|
self.label_map: dict[int, str] = {}
|
||||||
self.__build_classifier()
|
self.__build_classifier()
|
||||||
|
|
||||||
|
def __download_models(self, path: str) -> None:
|
||||||
|
try:
|
||||||
|
file_name = os.path.basename(path)
|
||||||
|
# conditionally import ModelDownloader
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
|
||||||
|
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to download {path}: {e}")
|
||||||
|
|
||||||
|
def __build_detector(self) -> None:
|
||||||
|
self.face_detector = cv2.FaceDetectorYN.create(
|
||||||
|
"/config/model_cache/facedet/facedet.onnx",
|
||||||
|
config="",
|
||||||
|
input_size=(320, 320),
|
||||||
|
score_threshold=0.8,
|
||||||
|
nms_threshold=0.3,
|
||||||
|
)
|
||||||
|
self.landmark_detector = cv2.face.createFacemarkLBF()
|
||||||
|
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
||||||
|
|
||||||
def __build_classifier(self) -> None:
|
def __build_classifier(self) -> None:
|
||||||
|
if not self.landmark_detector:
|
||||||
|
return None
|
||||||
|
|
||||||
labels = []
|
labels = []
|
||||||
faces = []
|
faces = []
|
||||||
|
|
||||||
dir = "/media/frigate/clips/faces"
|
dir = "/media/frigate/clips/faces"
|
||||||
for idx, name in enumerate(os.listdir(dir)):
|
for idx, name in enumerate(os.listdir(dir)):
|
||||||
if name == "debug":
|
if name == "train":
|
||||||
|
continue
|
||||||
|
|
||||||
|
face_folder = os.path.join(dir, name)
|
||||||
|
|
||||||
|
if not os.path.isdir(face_folder):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.label_map[idx] = name
|
self.label_map[idx] = name
|
||||||
face_folder = os.path.join(dir, name)
|
|
||||||
for image in os.listdir(face_folder):
|
for image in os.listdir(face_folder):
|
||||||
img = cv2.imread(os.path.join(face_folder, image))
|
img = cv2.imread(os.path.join(face_folder, image))
|
||||||
|
|
||||||
|
if img is None:
|
||||||
|
continue
|
||||||
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||||
faces.append(img)
|
faces.append(img)
|
||||||
labels.append(idx)
|
labels.append(idx)
|
||||||
|
|
||||||
|
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
||||||
|
cv2.face.LBPHFaceRecognizer_create(
|
||||||
|
radius=2, threshold=(1 - self.config.min_score) * 1000
|
||||||
|
)
|
||||||
|
)
|
||||||
self.recognizer.train(faces, np.array(labels))
|
self.recognizer.train(faces, np.array(labels))
|
||||||
|
|
||||||
def __align_face(
|
def __align_face(
|
||||||
@ -254,7 +313,17 @@ class FaceClassificationModel:
|
|||||||
self.labeler = None
|
self.labeler = None
|
||||||
self.label_map = {}
|
self.label_map = {}
|
||||||
|
|
||||||
def classify_face(self, face_image: np.ndarray) -> Optional[tuple[str, float]]:
|
def detect_faces(self, input: np.ndarray) -> tuple[int, cv2.typing.MatLike] | None:
|
||||||
|
if not self.face_detector:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
||||||
|
return self.face_detector.detect(input)
|
||||||
|
|
||||||
|
def classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
|
||||||
|
if not self.landmark_detector:
|
||||||
|
return None
|
||||||
|
|
||||||
if not self.label_map:
|
if not self.label_map:
|
||||||
self.__build_classifier()
|
self.__build_classifier()
|
||||||
|
|
||||||
|
|||||||
@ -255,7 +255,7 @@ def get_amd_gpu_stats() -> dict[str, str]:
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_intel_gpu_stats() -> dict[str, str]:
|
def get_intel_gpu_stats(sriov: bool) -> dict[str, str]:
|
||||||
"""Get stats using intel_gpu_top."""
|
"""Get stats using intel_gpu_top."""
|
||||||
|
|
||||||
def get_stats_manually(output: str) -> dict[str, str]:
|
def get_stats_manually(output: str) -> dict[str, str]:
|
||||||
@ -302,6 +302,9 @@ def get_intel_gpu_stats() -> dict[str, str]:
|
|||||||
"1",
|
"1",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if sriov:
|
||||||
|
intel_gpu_top_command += ["-d", "drm:/dev/dri/card0"]
|
||||||
|
|
||||||
p = sp.run(
|
p = sp.run(
|
||||||
intel_gpu_top_command,
|
intel_gpu_top_command,
|
||||||
encoding="ascii",
|
encoding="ascii",
|
||||||
|
|||||||
25
web/src/components/icons/AddFaceIcon.tsx
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
import { forwardRef } from "react";
|
||||||
|
import { LuPlus, LuScanFace } from "react-icons/lu";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
|
type AddFaceIconProps = {
|
||||||
|
className?: string;
|
||||||
|
onClick?: () => void;
|
||||||
|
};
|
||||||
|
|
||||||
|
const AddFaceIcon = forwardRef<HTMLDivElement, AddFaceIconProps>(
|
||||||
|
({ className, onClick }, ref) => {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
ref={ref}
|
||||||
|
className={cn("relative flex items-center", className)}
|
||||||
|
onClick={onClick}
|
||||||
|
>
|
||||||
|
<LuScanFace className="size-full" />
|
||||||
|
<LuPlus className="absolute size-4 translate-x-3 translate-y-3" />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
export default AddFaceIcon;
|
||||||
@ -1,19 +1,41 @@
|
|||||||
import { baseUrl } from "@/api/baseUrl";
|
import { baseUrl } from "@/api/baseUrl";
|
||||||
import Chip from "@/components/indicators/Chip";
|
import AddFaceIcon from "@/components/icons/AddFaceIcon";
|
||||||
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog";
|
import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog";
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
|
import {
|
||||||
|
DropdownMenu,
|
||||||
|
DropdownMenuContent,
|
||||||
|
DropdownMenuItem,
|
||||||
|
DropdownMenuLabel,
|
||||||
|
DropdownMenuTrigger,
|
||||||
|
} from "@/components/ui/dropdown-menu";
|
||||||
import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
|
import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
|
||||||
import { Toaster } from "@/components/ui/sonner";
|
import { Toaster } from "@/components/ui/sonner";
|
||||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||||
|
import {
|
||||||
|
Tooltip,
|
||||||
|
TooltipContent,
|
||||||
|
TooltipTrigger,
|
||||||
|
} from "@/components/ui/tooltip";
|
||||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||||
import { isDesktop } from "react-device-detect";
|
import { LuImagePlus, LuTrash2 } from "react-icons/lu";
|
||||||
import { LuImagePlus, LuTrash } from "react-icons/lu";
|
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
|
|
||||||
export default function FaceLibrary() {
|
export default function FaceLibrary() {
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
|
// title
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
document.title = "Face Library - Frigate";
|
||||||
|
}, []);
|
||||||
|
|
||||||
const [page, setPage] = useState<string>();
|
const [page, setPage] = useState<string>();
|
||||||
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
|
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
|
||||||
const tabsRef = useRef<HTMLDivElement | null>(null);
|
const tabsRef = useRef<HTMLDivElement | null>(null);
|
||||||
@ -23,7 +45,8 @@ export default function FaceLibrary() {
|
|||||||
const { data: faceData, mutate: refreshFaces } = useSWR("faces");
|
const { data: faceData, mutate: refreshFaces } = useSWR("faces");
|
||||||
|
|
||||||
const faces = useMemo<string[]>(
|
const faces = useMemo<string[]>(
|
||||||
() => (faceData ? Object.keys(faceData) : []),
|
() =>
|
||||||
|
faceData ? Object.keys(faceData).filter((face) => face != "train") : [],
|
||||||
[faceData],
|
[faceData],
|
||||||
);
|
);
|
||||||
const faceImages = useMemo<string[]>(
|
const faceImages = useMemo<string[]>(
|
||||||
@ -31,13 +54,24 @@ export default function FaceLibrary() {
|
|||||||
[pageToggle, faceData],
|
[pageToggle, faceData],
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const trainImages = useMemo<string[]>(
|
||||||
|
() => faceData?.["train"] || [],
|
||||||
|
[faceData],
|
||||||
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!pageToggle && faces) {
|
if (!pageToggle) {
|
||||||
|
if (trainImages.length > 0) {
|
||||||
|
setPageToggle("train");
|
||||||
|
} else if (faces) {
|
||||||
|
setPageToggle(faces[0]);
|
||||||
|
}
|
||||||
|
} else if (pageToggle == "train" && trainImages.length == 0) {
|
||||||
setPageToggle(faces[0]);
|
setPageToggle(faces[0]);
|
||||||
}
|
}
|
||||||
// we need to listen on the value of the faces list
|
// we need to listen on the value of the faces list
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [faces]);
|
}, [trainImages, faces]);
|
||||||
|
|
||||||
// upload
|
// upload
|
||||||
|
|
||||||
@ -58,7 +92,7 @@ export default function FaceLibrary() {
|
|||||||
setUpload(false);
|
setUpload(false);
|
||||||
refreshFaces();
|
refreshFaces();
|
||||||
toast.success(
|
toast.success(
|
||||||
"Successfully uploaded iamge. View the file in the /exports folder.",
|
"Successfully uploaded image. View the file in the /exports folder.",
|
||||||
{ position: "top-center" },
|
{ position: "top-center" },
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -79,6 +113,10 @@ export default function FaceLibrary() {
|
|||||||
[pageToggle, refreshFaces],
|
[pageToggle, refreshFaces],
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (!config) {
|
||||||
|
return <ActivityIndicator />;
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex size-full flex-col p-2">
|
<div className="flex size-full flex-col p-2">
|
||||||
<Toaster />
|
<Toaster />
|
||||||
@ -91,7 +129,7 @@ export default function FaceLibrary() {
|
|||||||
onSave={onUploadImage}
|
onSave={onUploadImage}
|
||||||
/>
|
/>
|
||||||
|
|
||||||
<div className="relative flex h-11 w-full items-center justify-between">
|
<div className="relative mb-2 flex h-11 w-full items-center justify-between">
|
||||||
<ScrollArea className="w-full whitespace-nowrap">
|
<ScrollArea className="w-full whitespace-nowrap">
|
||||||
<div ref={tabsRef} className="flex flex-row">
|
<div ref={tabsRef} className="flex flex-row">
|
||||||
<ToggleGroup
|
<ToggleGroup
|
||||||
@ -105,53 +143,146 @@ export default function FaceLibrary() {
|
|||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
|
{trainImages.length > 0 && (
|
||||||
|
<>
|
||||||
|
<ToggleGroupItem
|
||||||
|
value="train"
|
||||||
|
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == "train" ? "" : "*:text-muted-foreground"}`}
|
||||||
|
data-nav-item="train"
|
||||||
|
aria-label="Select train"
|
||||||
|
>
|
||||||
|
<div>Train</div>
|
||||||
|
</ToggleGroupItem>
|
||||||
|
<div>|</div>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
|
||||||
{Object.values(faces).map((item) => (
|
{Object.values(faces).map((item) => (
|
||||||
<ToggleGroupItem
|
<ToggleGroupItem
|
||||||
key={item}
|
key={item}
|
||||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${page == "UI settings" ? "last:mr-20" : ""} ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
className={`flex scroll-mx-10 items-center justify-between gap-2 ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
||||||
value={item}
|
value={item}
|
||||||
data-nav-item={item}
|
data-nav-item={item}
|
||||||
aria-label={`Select ${item}`}
|
aria-label={`Select ${item}`}
|
||||||
>
|
>
|
||||||
<div className="capitalize">{item}</div>
|
<div className="capitalize">
|
||||||
|
{item} ({faceData[item].length})
|
||||||
|
</div>
|
||||||
</ToggleGroupItem>
|
</ToggleGroupItem>
|
||||||
))}
|
))}
|
||||||
</ToggleGroup>
|
</ToggleGroup>
|
||||||
<ScrollBar orientation="horizontal" className="h-0" />
|
<ScrollBar orientation="horizontal" className="h-0" />
|
||||||
</div>
|
</div>
|
||||||
</ScrollArea>
|
</ScrollArea>
|
||||||
|
<Button className="flex gap-2" onClick={() => setUpload(true)}>
|
||||||
|
<LuImagePlus className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||||
|
Upload Image
|
||||||
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
{pageToggle && (
|
{pageToggle &&
|
||||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
(pageToggle == "train" ? (
|
||||||
{faceImages.map((image: string) => (
|
<TrainingGrid
|
||||||
<FaceImage key={image} name={pageToggle} image={image} />
|
config={config}
|
||||||
))}
|
attemptImages={trainImages}
|
||||||
<Button
|
faceNames={faces}
|
||||||
key="upload"
|
onRefresh={refreshFaces}
|
||||||
className="size-40"
|
/>
|
||||||
onClick={() => setUpload(true)}
|
) : (
|
||||||
>
|
<FaceGrid
|
||||||
<LuImagePlus className="size-10" />
|
faceImages={faceImages}
|
||||||
</Button>
|
pageToggle={pageToggle}
|
||||||
</div>
|
onRefresh={refreshFaces}
|
||||||
)}
|
/>
|
||||||
|
))}
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
type FaceImageProps = {
|
type TrainingGridProps = {
|
||||||
name: string;
|
config: FrigateConfig;
|
||||||
image: string;
|
attemptImages: string[];
|
||||||
|
faceNames: string[];
|
||||||
|
onRefresh: () => void;
|
||||||
};
|
};
|
||||||
function FaceImage({ name, image }: FaceImageProps) {
|
function TrainingGrid({
|
||||||
const [hovered, setHovered] = useState(false);
|
config,
|
||||||
|
attemptImages,
|
||||||
|
faceNames,
|
||||||
|
onRefresh,
|
||||||
|
}: TrainingGridProps) {
|
||||||
|
return (
|
||||||
|
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
||||||
|
{attemptImages.map((image: string) => (
|
||||||
|
<FaceAttempt
|
||||||
|
key={image}
|
||||||
|
image={image}
|
||||||
|
faceNames={faceNames}
|
||||||
|
threshold={config.face_recognition.threshold}
|
||||||
|
onRefresh={onRefresh}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
type FaceAttemptProps = {
|
||||||
|
image: string;
|
||||||
|
faceNames: string[];
|
||||||
|
threshold: number;
|
||||||
|
onRefresh: () => void;
|
||||||
|
};
|
||||||
|
function FaceAttempt({
|
||||||
|
image,
|
||||||
|
faceNames,
|
||||||
|
threshold,
|
||||||
|
onRefresh,
|
||||||
|
}: FaceAttemptProps) {
|
||||||
|
const data = useMemo(() => {
|
||||||
|
const parts = image.split("-");
|
||||||
|
|
||||||
|
return {
|
||||||
|
eventId: `${parts[0]}-${parts[1]}`,
|
||||||
|
name: parts[2],
|
||||||
|
score: parts[3],
|
||||||
|
};
|
||||||
|
}, [image]);
|
||||||
|
|
||||||
|
const onTrainAttempt = useCallback(
|
||||||
|
(trainName: string) => {
|
||||||
|
axios
|
||||||
|
.post(`/faces/train/${trainName}/classify`, { training_file: image })
|
||||||
|
.then((resp) => {
|
||||||
|
if (resp.status == 200) {
|
||||||
|
toast.success(`Successfully trained face.`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
onRefresh();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
if (error.response?.data?.message) {
|
||||||
|
toast.error(`Failed to train: ${error.response.data.message}`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
toast.error(`Failed to train: ${error.message}`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[image, onRefresh],
|
||||||
|
);
|
||||||
|
|
||||||
const onDelete = useCallback(() => {
|
const onDelete = useCallback(() => {
|
||||||
axios
|
axios
|
||||||
.post(`/faces/${name}/delete`, { ids: [image] })
|
.post(`/faces/train/delete`, { ids: [image] })
|
||||||
.then((resp) => {
|
.then((resp) => {
|
||||||
if (resp.status == 200) {
|
if (resp.status == 200) {
|
||||||
toast.error(`Successfully deleted face.`, { position: "top-center" });
|
toast.success(`Successfully deleted face.`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
onRefresh();
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
@ -165,29 +296,139 @@ function FaceImage({ name, image }: FaceImageProps) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}, [name, image]);
|
}, [image, onRefresh]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<div className="relative flex flex-col rounded-lg">
|
||||||
className="relative h-40"
|
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
|
||||||
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
|
<img className="h-40" src={`${baseUrl}clips/faces/train/${image}`} />
|
||||||
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
|
</div>
|
||||||
onClick={isDesktop ? undefined : () => setHovered(!hovered)}
|
<div className="rounded-b-lg bg-card p-2">
|
||||||
>
|
<div className="flex w-full flex-row items-center justify-between gap-2">
|
||||||
{hovered && (
|
<div className="flex flex-col items-start text-xs text-primary-variant">
|
||||||
<div className="absolute right-1 top-1">
|
<div className="capitalize">{data.name}</div>
|
||||||
<Chip
|
<div
|
||||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
className={cn(
|
||||||
onClick={() => onDelete()}
|
Number.parseFloat(data.score) >= threshold
|
||||||
>
|
? "text-success"
|
||||||
<LuTrash className="size-4 fill-destructive text-destructive" />
|
: "text-danger",
|
||||||
</Chip>
|
)}
|
||||||
|
>
|
||||||
|
{Number.parseFloat(data.score) * 100}%
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
|
||||||
|
<Tooltip>
|
||||||
|
<DropdownMenu>
|
||||||
|
<DropdownMenuTrigger>
|
||||||
|
<TooltipTrigger>
|
||||||
|
<AddFaceIcon className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
|
||||||
|
</TooltipTrigger>
|
||||||
|
</DropdownMenuTrigger>
|
||||||
|
<DropdownMenuContent>
|
||||||
|
<DropdownMenuLabel>Train Face as:</DropdownMenuLabel>
|
||||||
|
{faceNames.map((faceName) => (
|
||||||
|
<DropdownMenuItem
|
||||||
|
key={faceName}
|
||||||
|
className="cursor-pointer capitalize"
|
||||||
|
onClick={() => onTrainAttempt(faceName)}
|
||||||
|
>
|
||||||
|
{faceName}
|
||||||
|
</DropdownMenuItem>
|
||||||
|
))}
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
<TooltipContent>Train Face as Person</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
<Tooltip>
|
||||||
|
<TooltipTrigger>
|
||||||
|
<LuTrash2
|
||||||
|
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||||
|
onClick={onDelete}
|
||||||
|
/>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>Delete Face Attempt</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
</div>
|
||||||
<img
|
</div>
|
||||||
className="h-40 rounded-md"
|
);
|
||||||
src={`${baseUrl}clips/faces/${name}/${image}`}
|
}
|
||||||
/>
|
|
||||||
|
type FaceGridProps = {
|
||||||
|
faceImages: string[];
|
||||||
|
pageToggle: string;
|
||||||
|
onRefresh: () => void;
|
||||||
|
};
|
||||||
|
function FaceGrid({ faceImages, pageToggle, onRefresh }: FaceGridProps) {
|
||||||
|
return (
|
||||||
|
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
||||||
|
{faceImages.map((image: string) => (
|
||||||
|
<FaceImage
|
||||||
|
key={image}
|
||||||
|
name={pageToggle}
|
||||||
|
image={image}
|
||||||
|
onRefresh={onRefresh}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
type FaceImageProps = {
|
||||||
|
name: string;
|
||||||
|
image: string;
|
||||||
|
onRefresh: () => void;
|
||||||
|
};
|
||||||
|
function FaceImage({ name, image, onRefresh }: FaceImageProps) {
|
||||||
|
const onDelete = useCallback(() => {
|
||||||
|
axios
|
||||||
|
.post(`/faces/${name}/delete`, { ids: [image] })
|
||||||
|
.then((resp) => {
|
||||||
|
if (resp.status == 200) {
|
||||||
|
toast.success(`Successfully deleted face.`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
onRefresh();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
if (error.response?.data?.message) {
|
||||||
|
toast.error(`Failed to delete: ${error.response.data.message}`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
toast.error(`Failed to delete: ${error.message}`, {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}, [name, image, onRefresh]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="relative flex flex-col rounded-lg">
|
||||||
|
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
|
||||||
|
<img className="h-40" src={`${baseUrl}clips/faces/${name}/${image}`} />
|
||||||
|
</div>
|
||||||
|
<div className="rounded-b-lg bg-card p-2">
|
||||||
|
<div className="flex w-full flex-row items-center justify-between gap-2">
|
||||||
|
<div className="flex flex-col items-start text-xs text-primary-variant">
|
||||||
|
<div className="capitalize">{name}</div>
|
||||||
|
</div>
|
||||||
|
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
|
||||||
|
<Tooltip>
|
||||||
|
<TooltipTrigger>
|
||||||
|
<LuTrash2
|
||||||
|
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||||
|
onClick={onDelete}
|
||||||
|
/>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>Delete Face Attempt</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import { FrigateStats } from "@/types/stats";
|
import { FrigateStats } from "@/types/stats";
|
||||||
import { useEffect, useState } from "react";
|
import { useEffect, useMemo, useState } from "react";
|
||||||
import TimeAgo from "@/components/dynamic/TimeAgo";
|
import TimeAgo from "@/components/dynamic/TimeAgo";
|
||||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||||
import { isDesktop, isMobile } from "react-device-detect";
|
import { isDesktop, isMobile } from "react-device-detect";
|
||||||
import GeneralMetrics from "@/views/system/GeneralMetrics";
|
import GeneralMetrics from "@/views/system/GeneralMetrics";
|
||||||
import StorageMetrics from "@/views/system/StorageMetrics";
|
import StorageMetrics from "@/views/system/StorageMetrics";
|
||||||
import { LuActivity, LuHardDrive } from "react-icons/lu";
|
import { LuActivity, LuHardDrive, LuSearchCode } from "react-icons/lu";
|
||||||
import { FaVideo } from "react-icons/fa";
|
import { FaVideo } from "react-icons/fa";
|
||||||
import Logo from "@/components/Logo";
|
import Logo from "@/components/Logo";
|
||||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||||
@ -16,11 +16,28 @@ import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
|||||||
import { Toaster } from "@/components/ui/sonner";
|
import { Toaster } from "@/components/ui/sonner";
|
||||||
import { t } from "i18next";
|
import { t } from "i18next";
|
||||||
import { Trans } from "react-i18next";
|
import { Trans } from "react-i18next";
|
||||||
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
import FeatureMetrics from "@/views/system/FeatureMetrics";
|
||||||
|
|
||||||
const metrics = ["general", "storage", "cameras"] as const;
|
const allMetrics = ["general", "features", "storage", "cameras"] as const;
|
||||||
type SystemMetric = (typeof metrics)[number];
|
type SystemMetric = (typeof allMetrics)[number];
|
||||||
|
|
||||||
function System() {
|
function System() {
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const metrics = useMemo(() => {
|
||||||
|
const metrics = [...allMetrics];
|
||||||
|
|
||||||
|
if (!config?.semantic_search.enabled) {
|
||||||
|
const index = metrics.indexOf("features");
|
||||||
|
metrics.splice(index, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics;
|
||||||
|
}, [config]);
|
||||||
|
|
||||||
// stats page
|
// stats page
|
||||||
|
|
||||||
const [page, setPage] = useHashState<SystemMetric>();
|
const [page, setPage] = useHashState<SystemMetric>();
|
||||||
@ -69,6 +86,7 @@ function System() {
|
|||||||
aria-label={`Select ${item}`}
|
aria-label={`Select ${item}`}
|
||||||
>
|
>
|
||||||
{item == "general" && <LuActivity className="size-4" />}
|
{item == "general" && <LuActivity className="size-4" />}
|
||||||
|
{item == "features" && <LuSearchCode className="size-4" />}
|
||||||
{item == "storage" && <LuHardDrive className="size-4" />}
|
{item == "storage" && <LuHardDrive className="size-4" />}
|
||||||
{item == "cameras" && <FaVideo className="size-4" />}
|
{item == "cameras" && <FaVideo className="size-4" />}
|
||||||
{isDesktop && (
|
{isDesktop && (
|
||||||
@ -101,6 +119,12 @@ function System() {
|
|||||||
setLastUpdated={setLastUpdated}
|
setLastUpdated={setLastUpdated}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
|
{page == "features" && (
|
||||||
|
<FeatureMetrics
|
||||||
|
lastUpdated={lastUpdated}
|
||||||
|
setLastUpdated={setLastUpdated}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
{page == "storage" && <StorageMetrics setLastUpdated={setLastUpdated} />}
|
{page == "storage" && <StorageMetrics setLastUpdated={setLastUpdated} />}
|
||||||
{page == "cameras" && (
|
{page == "cameras" && (
|
||||||
<CameraMetrics
|
<CameraMetrics
|
||||||
|
|||||||
@ -290,6 +290,7 @@ export interface FrigateConfig {
|
|||||||
|
|
||||||
face_recognition: {
|
face_recognition: {
|
||||||
enabled: boolean;
|
enabled: boolean;
|
||||||
|
threshold: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
ffmpeg: {
|
ffmpeg: {
|
||||||
|
|||||||
@ -18,6 +18,11 @@ export const InferenceThreshold = {
|
|||||||
error: 100,
|
error: 100,
|
||||||
} as Threshold;
|
} as Threshold;
|
||||||
|
|
||||||
|
export const EmbeddingThreshold = {
|
||||||
|
warning: 500,
|
||||||
|
error: 1000,
|
||||||
|
} as Threshold;
|
||||||
|
|
||||||
export const DetectorTempThreshold = {
|
export const DetectorTempThreshold = {
|
||||||
warning: 72,
|
warning: 72,
|
||||||
error: 80,
|
error: 80,
|
||||||
|
|||||||
@ -2,6 +2,7 @@ export interface FrigateStats {
|
|||||||
cameras: { [camera_name: string]: CameraStats };
|
cameras: { [camera_name: string]: CameraStats };
|
||||||
cpu_usages: { [pid: string]: CpuStats };
|
cpu_usages: { [pid: string]: CpuStats };
|
||||||
detectors: { [detectorKey: string]: DetectorStats };
|
detectors: { [detectorKey: string]: DetectorStats };
|
||||||
|
embeddings?: EmbeddingsStats;
|
||||||
gpu_usages?: { [gpuKey: string]: GpuStats };
|
gpu_usages?: { [gpuKey: string]: GpuStats };
|
||||||
processes: { [processKey: string]: ExtraProcessStats };
|
processes: { [processKey: string]: ExtraProcessStats };
|
||||||
service: ServiceStats;
|
service: ServiceStats;
|
||||||
@ -34,6 +35,13 @@ export type DetectorStats = {
|
|||||||
pid: number;
|
pid: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export type EmbeddingsStats = {
|
||||||
|
image_embedding_speed: number;
|
||||||
|
face_embedding_speed: number;
|
||||||
|
plate_recognition_speed: number;
|
||||||
|
text_embedding_speed: number;
|
||||||
|
};
|
||||||
|
|
||||||
export type ExtraProcessStats = {
|
export type ExtraProcessStats = {
|
||||||
pid: number;
|
pid: number;
|
||||||
};
|
};
|
||||||
|
|||||||
122
web/src/views/system/FeatureMetrics.tsx
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
import useSWR from "swr";
|
||||||
|
import { FrigateStats } from "@/types/stats";
|
||||||
|
import { useEffect, useMemo, useState } from "react";
|
||||||
|
import { useFrigateStats } from "@/api/ws";
|
||||||
|
import { EmbeddingThreshold } from "@/types/graph";
|
||||||
|
import { Skeleton } from "@/components/ui/skeleton";
|
||||||
|
import { ThresholdBarGraph } from "@/components/graph/SystemGraph";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
|
type FeatureMetricsProps = {
|
||||||
|
lastUpdated: number;
|
||||||
|
setLastUpdated: (last: number) => void;
|
||||||
|
};
|
||||||
|
export default function FeatureMetrics({
|
||||||
|
lastUpdated,
|
||||||
|
setLastUpdated,
|
||||||
|
}: FeatureMetricsProps) {
|
||||||
|
// stats
|
||||||
|
|
||||||
|
const { data: initialStats } = useSWR<FrigateStats[]>(
|
||||||
|
["stats/history", { keys: "embeddings,service" }],
|
||||||
|
{
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const [statsHistory, setStatsHistory] = useState<FrigateStats[]>([]);
|
||||||
|
const updatedStats = useFrigateStats();
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (initialStats == undefined || initialStats.length == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (statsHistory.length == 0) {
|
||||||
|
setStatsHistory(initialStats);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!updatedStats) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (updatedStats.service.last_updated > lastUpdated) {
|
||||||
|
setStatsHistory([...statsHistory.slice(1), updatedStats]);
|
||||||
|
setLastUpdated(Date.now() / 1000);
|
||||||
|
}
|
||||||
|
}, [initialStats, updatedStats, statsHistory, lastUpdated, setLastUpdated]);
|
||||||
|
|
||||||
|
// timestamps
|
||||||
|
|
||||||
|
const updateTimes = useMemo(
|
||||||
|
() => statsHistory.map((stats) => stats.service.last_updated),
|
||||||
|
[statsHistory],
|
||||||
|
);
|
||||||
|
|
||||||
|
// features stats
|
||||||
|
|
||||||
|
const embeddingInferenceTimeSeries = useMemo(() => {
|
||||||
|
if (!statsHistory) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const series: {
|
||||||
|
[key: string]: { name: string; data: { x: number; y: number }[] };
|
||||||
|
} = {};
|
||||||
|
|
||||||
|
statsHistory.forEach((stats, statsIdx) => {
|
||||||
|
if (!stats?.embeddings) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Object.entries(stats.embeddings).forEach(([rawKey, stat]) => {
|
||||||
|
const key = rawKey.replaceAll("_", " ");
|
||||||
|
|
||||||
|
if (!(key in series)) {
|
||||||
|
series[key] = { name: key, data: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
series[key].data.push({ x: statsIdx + 1, y: stat });
|
||||||
|
});
|
||||||
|
});
|
||||||
|
return Object.values(series);
|
||||||
|
}, [statsHistory]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div className="scrollbar-container mt-4 flex size-full flex-col overflow-y-auto">
|
||||||
|
<div className="text-sm font-medium text-muted-foreground">
|
||||||
|
Features
|
||||||
|
</div>
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
"mt-4 grid w-full grid-cols-1 gap-2 sm:grid-cols-3",
|
||||||
|
embeddingInferenceTimeSeries && "sm:grid-cols-4",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
{statsHistory.length != 0 ? (
|
||||||
|
<>
|
||||||
|
{embeddingInferenceTimeSeries.map((series) => (
|
||||||
|
<div className="rounded-lg bg-background_alt p-2.5 md:rounded-2xl">
|
||||||
|
<div className="mb-5 capitalize">{series.name}</div>
|
||||||
|
<ThresholdBarGraph
|
||||||
|
key={series.name}
|
||||||
|
graphId={`${series.name}-inference`}
|
||||||
|
name={series.name}
|
||||||
|
unit="ms"
|
||||||
|
threshold={EmbeddingThreshold}
|
||||||
|
updateTimes={updateTimes}
|
||||||
|
data={[series]}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<Skeleton className="aspect-video w-full rounded-lg md:rounded-2xl" />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||