From 58374e830c8973cb63ef4ebb56ec03f7522e9fdc Mon Sep 17 00:00:00 2001 From: Dennis George Date: Sat, 10 Dec 2022 16:11:12 -0600 Subject: [PATCH] update detectors docs; detect detector configs dynamic --- docs/docs/configuration/detectors.md | 95 +++++++++++++----------- docs/docs/configuration/index.md | 14 ++-- frigate/config.py | 77 +++---------------- frigate/detectors/__init__.py | 7 +- frigate/detectors/detector_config.py | 78 +++++++++++++++++++ frigate/detectors/detector_types.py | 9 +++ frigate/detectors/plugins/cpu_tfl.py | 12 ++- frigate/detectors/plugins/edgetpu_tfl.py | 12 ++- frigate/detectors/plugins/openvino.py | 10 ++- frigate/test/test_config.py | 2 +- frigate/test/test_object_detector.py | 11 ++- 11 files changed, 196 insertions(+), 131 deletions(-) create mode 100644 frigate/detectors/detector_config.py diff --git a/docs/docs/configuration/detectors.md b/docs/docs/configuration/detectors.md index 268502f3e..53315b609 100644 --- a/docs/docs/configuration/detectors.md +++ b/docs/docs/configuration/detectors.md @@ -3,11 +3,37 @@ id: detectors title: Detectors --- -By default, Frigate will use a single CPU detector. If you have a Coral, you will need to configure your detector devices in the config file. When using multiple detectors, they run in dedicated processes, but pull from a common queue of requested detections across all cameras. +Frigate provides the following builtin detector types: `cpu`, `edgetpu`, and `openvino`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. -Frigate supports `edgetpu` and `cpu` as detector types. The device value should be specified according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). +**Note**: There is not yet support for Nvidia GPUs to perform object detection with tensorflow. It can be used for ffmpeg decoding, but not object detection. -**Note**: There is no support for Nvidia GPUs to perform object detection with tensorflow. It can be used for ffmpeg decoding, but not object detection. +## CPU Detector (not recommended) +The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. + +The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.` + +A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. + +```yaml +detectors: + cpu1: + type: cpu + num_threads: 3 + model: + path: "/custom_model.tflite" + cpu2: + type: cpu + num_threads: 3 +``` + +When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. + +## Edge-TPU Detector +The EdgeTPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an EdgeTPU detector, set the `"type"` attribute to `"edgetpu"`. + +The EdgeTPU device can specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. + +A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. ### Single USB Coral @@ -16,6 +42,8 @@ detectors: coral: type: edgetpu device: usb + model: + path: "/custom_model.tflite" ``` ### Multiple USB Corals @@ -64,38 +92,32 @@ detectors: device: pci ``` -### CPU Detectors (not recommended) +## OpenVINO Detector +The OpenVINO detector type runs an OpenVINO IR model on Intel CPU, GPU and VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. + +The OpenVINO device plugin is specified using the `"device"` attribute according to naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html). Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. If not specified, the default OpenVINO device will be selected by the `AUTO` plugin. + +OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) + +An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector. ```yaml detectors: - cpu1: - type: cpu - num_threads: 3 - cpu2: - type: cpu - num_threads: 3 -``` - -When using CPU detectors, you can add a CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. - -## OpenVINO - -The OpenVINO detector allows Frigate to run an OpenVINO IR model on Intel CPU, GPU and VPU hardware. - -### OpenVINO Devices - -The OpenVINO detector supports the Intel-supplied device plugins and can specify one or more devices in the configuration. See OpenVINO's device naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html) for more detail. Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. - -```yaml -detectors: - ov_detector: + ov: type: openvino - device: GPU + device: AUTO + model: + path: /openvino-model/ssdlite_mobilenet_v2.xml + +model: + width: 300 + height: 300 + input_tensor: nhwc + input_pixel_format: bgr + labelmap_path: /openvino-model/coco_91cl_bkgr.txt ``` -OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the GPU device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) - -#### Intel NCS2 VPU and Myriad X Setup +### Intel NCS2 VPU and Myriad X Setup Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. @@ -123,18 +145,3 @@ device_cgroup_rules: volumes: - /dev/bus/usb:/dev/bus/usb ``` - -### OpenVINO Models - -The included model for an OpenVINO detector comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector. - -```yaml -model: - path: /openvino-model/ssdlite_mobilenet_v2.xml - width: 300 - height: 300 - input_tensor: nhwc - input_pixel_format: bgr - labelmap_path: /openvino-model/coco_91cl_bkgr.txt - -``` diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index c35950da2..a0ede7448 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -74,15 +74,13 @@ mqtt: # Optional: Detectors configuration. Defaults to a single CPU detector detectors: # Required: name of the detector - coral: + detector_name: # Required: type of the detector - # Valid values are 'edgetpu' (requires device property below) `openvino` (see Detectors documentation), and 'cpu'. - type: edgetpu - # Optional: Edgetpu or OpenVino device name - device: usb - # Optional: num_threads value passed to the tflite.Interpreter (default: shown below) - # This value is only used for CPU types - num_threads: 3 + # Frigate provided types include 'cpu', 'edgetpu', and 'openvino' + # Additional detector types can also be plugged in. + # Detectors may require additional configuration. + # Refer to the Detectors configuration page for more information. + type: cpu # Optional: Database configuration database: diff --git a/frigate/config.py b/frigate/config.py index b22a7bf45..927cc3c5d 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -9,7 +9,7 @@ from typing import Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np import yaml -from pydantic import BaseModel, Extra, Field, validator +from pydantic import BaseModel, Extra, Field, validator, parse_obj_as from pydantic.fields import PrivateAttr from frigate.const import ( @@ -32,7 +32,12 @@ from frigate.ffmpeg_presets import ( parse_preset_output_record, parse_preset_output_rtmp, ) -from frigate.detectors import DetectorTypeEnum +from frigate.detectors import ( + PixelFormatEnum, + InputTensorEnum, + ModelConfig, + DetectorConfig, +) from frigate.version import VERSION @@ -715,70 +720,6 @@ class DatabaseConfig(FrigateBaseModel): ) -class PixelFormatEnum(str, Enum): - rgb = "rgb" - bgr = "bgr" - yuv = "yuv" - - -class InputTensorEnum(str, Enum): - nchw = "nchw" - nhwc = "nhwc" - - -class ModelConfig(FrigateBaseModel): - path: Optional[str] = Field(title="Custom Object detection model path.") - labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") - width: int = Field(default=320, title="Object detection model input width.") - height: int = Field(default=320, title="Object detection model input height.") - labelmap: Dict[int, str] = Field( - default_factory=dict, title="Labelmap customization." - ) - input_tensor: InputTensorEnum = Field( - default=InputTensorEnum.nhwc, title="Model Input Tensor Shape" - ) - input_pixel_format: PixelFormatEnum = Field( - default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" - ) - _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() - _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() - - @property - def merged_labelmap(self) -> Dict[int, str]: - return self._merged_labelmap - - @property - def colormap(self) -> Dict[int, Tuple[int, int, int]]: - return self._colormap - - def __init__(self, **config): - super().__init__(**config) - - self._merged_labelmap = { - **load_labels(config.get("labelmap_path", "/labelmap.txt")), - **config.get("labelmap", {}), - } - - cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys())) - - self._colormap = {} - for key, val in self._merged_labelmap.items(): - self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) - - -class DetectorConfig(BaseModel): - type: str = Field(default=DetectorTypeEnum.cpu, title="Detector Type") - device: Optional[str] = Field(default="usb", title="Device Type") - num_threads: Optional[int] = Field(default=3, title="Number of detection threads") - model: ModelConfig = Field( - default=None, title="Detector specific model configuration." - ) - - class Config: - extra = Extra.allow - arbitrary_types_allowed = True - - class LogLevelEnum(str, Enum): debug = "debug" info = "info" @@ -893,7 +834,7 @@ class FrigateConfig(FrigateBaseModel): default_factory=ModelConfig, title="Detection model configuration." ) detectors: Dict[str, DetectorConfig] = Field( - default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()}, + default=parse_obj_as(Dict[str, DetectorConfig], DEFAULT_DETECTORS), title="Detector hardware configuration.", ) logger: LoggerConfig = Field( @@ -1037,7 +978,7 @@ class FrigateConfig(FrigateBaseModel): config.cameras[name] = camera_config for key, detector in config.detectors.items(): - detector_config: DetectorConfig = DetectorConfig.parse_obj(detector) + detector_config: DetectorConfig = parse_obj_as(DetectorConfig, detector) if detector_config.model is None: detector_config.model = config.model else: diff --git a/frigate/detectors/__init__.py b/frigate/detectors/__init__.py index 47dfa39bc..7cbd82f08 100644 --- a/frigate/detectors/__init__.py +++ b/frigate/detectors/__init__.py @@ -1,7 +1,12 @@ import logging from .detection_api import DetectionApi -from .detector_types import DetectorTypeEnum, api_types +from .detector_config import ( + PixelFormatEnum, + InputTensorEnum, + ModelConfig, +) +from .detector_types import DetectorTypeEnum, api_types, DetectorConfig logger = logging.getLogger(__name__) diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py new file mode 100644 index 000000000..7eb8701f3 --- /dev/null +++ b/frigate/detectors/detector_config.py @@ -0,0 +1,78 @@ +import logging +from enum import Enum +from typing import Dict, List, Optional, Tuple, Union, Literal + +import matplotlib.pyplot as plt +from pydantic import BaseModel, Extra, Field, validator +from pydantic.fields import PrivateAttr + +from frigate.util import load_labels + + +logger = logging.getLogger(__name__) + + +class PixelFormatEnum(str, Enum): + rgb = "rgb" + bgr = "bgr" + yuv = "yuv" + + +class InputTensorEnum(str, Enum): + nchw = "nchw" + nhwc = "nhwc" + + +class ModelConfig(BaseModel): + path: Optional[str] = Field(title="Custom Object detection model path.") + labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") + width: int = Field(default=320, title="Object detection model input width.") + height: int = Field(default=320, title="Object detection model input height.") + labelmap: Dict[int, str] = Field( + default_factory=dict, title="Labelmap customization." + ) + input_tensor: InputTensorEnum = Field( + default=InputTensorEnum.nhwc, title="Model Input Tensor Shape" + ) + input_pixel_format: PixelFormatEnum = Field( + default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format" + ) + _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() + _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() + + @property + def merged_labelmap(self) -> Dict[int, str]: + return self._merged_labelmap + + @property + def colormap(self) -> Dict[int, Tuple[int, int, int]]: + return self._colormap + + def __init__(self, **config): + super().__init__(**config) + + self._merged_labelmap = { + **load_labels(config.get("labelmap_path", "/labelmap.txt")), + **config.get("labelmap", {}), + } + + cmap = plt.cm.get_cmap("tab10", len(self._merged_labelmap.keys())) + + self._colormap = {} + for key, val in self._merged_labelmap.items(): + self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) + + class Config: + extra = Extra.forbid + + +class BaseDetectorConfig(BaseModel): + # the type field must be defined in all subclasses + type: str = Field(default="cpu", title="Detector Type") + model: ModelConfig = Field( + default=None, title="Detector specific model configuration." + ) + + class Config: + extra = Extra.allow + arbitrary_types_allowed = True diff --git a/frigate/detectors/detector_types.py b/frigate/detectors/detector_types.py index 48505c218..1e2269c94 100644 --- a/frigate/detectors/detector_types.py +++ b/frigate/detectors/detector_types.py @@ -1,10 +1,14 @@ import logging import importlib import pkgutil +from typing import Union +from typing_extensions import Annotated from enum import Enum +from pydantic import Field from . import plugins from .detection_api import DetectionApi +from .detector_config import BaseDetectorConfig logger = logging.getLogger(__name__) @@ -24,3 +28,8 @@ class StrEnum(str, Enum): DetectorTypeEnum = StrEnum("DetectorTypeEnum", {k: k for k in api_types}) + +DetectorConfig = Annotated[ + Union[tuple(BaseDetectorConfig.__subclasses__())], + Field(discriminator="type"), +] diff --git a/frigate/detectors/plugins/cpu_tfl.py b/frigate/detectors/plugins/cpu_tfl.py index 1557497e2..28235639a 100644 --- a/frigate/detectors/plugins/cpu_tfl.py +++ b/frigate/detectors/plugins/cpu_tfl.py @@ -2,19 +2,27 @@ import logging import numpy as np from frigate.detectors.detection_api import DetectionApi +from frigate.detectors.detector_config import BaseDetectorConfig +from typing import Literal +from pydantic import Extra, Field import tflite_runtime.interpreter as tflite logger = logging.getLogger(__name__) +class CpuDetectorConfig(BaseDetectorConfig): + type: Literal["cpu"] + num_threads: int = Field(default=3, title="Number of detection threads") + + class CpuTfl(DetectionApi): type_key = "cpu" - def __init__(self, detector_config): + def __init__(self, detector_config: CpuDetectorConfig): self.interpreter = tflite.Interpreter( model_path=detector_config.model.path or "/cpu_model.tflite", - num_threads=detector_config.num_threads, + num_threads=detector_config.num_threads or 3, ) self.interpreter.allocate_tensors() diff --git a/frigate/detectors/plugins/edgetpu_tfl.py b/frigate/detectors/plugins/edgetpu_tfl.py index a08fa9e28..d8ba0b2e1 100644 --- a/frigate/detectors/plugins/edgetpu_tfl.py +++ b/frigate/detectors/plugins/edgetpu_tfl.py @@ -2,6 +2,9 @@ import logging import numpy as np from frigate.detectors.detection_api import DetectionApi +from frigate.detectors.detector_config import BaseDetectorConfig +from typing import Literal +from pydantic import Extra, Field import tflite_runtime.interpreter as tflite from tflite_runtime.interpreter import load_delegate @@ -9,12 +12,17 @@ from tflite_runtime.interpreter import load_delegate logger = logging.getLogger(__name__) +class EdgeTpuDetectorConfig(BaseDetectorConfig): + type: Literal["edgetpu"] + device: str = Field(default="usb", title="Device Type") + + class EdgeTpuTfl(DetectionApi): type_key = "edgetpu" - def __init__(self, detector_config): + def __init__(self, detector_config: EdgeTpuDetectorConfig): device_config = {"device": "usb"} - if not detector_config.device is None: + if detector_config.device is not None: device_config = {"device": detector_config.device} edge_tpu_delegate = None diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 200fffcf3..91195f7cc 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -3,15 +3,23 @@ import numpy as np import openvino.runtime as ov from frigate.detectors.detection_api import DetectionApi +from frigate.detectors.detector_config import BaseDetectorConfig +from typing import Literal +from pydantic import Extra, Field logger = logging.getLogger(__name__) +class OvDetectorConfig(BaseDetectorConfig): + type: Literal["openvino"] + device: str = Field(default="AUTO", title="Device Type") + + class OvDetector(DetectionApi): type_key = "openvino" - def __init__(self, detector_config): + def __init__(self, detector_config: OvDetectorConfig): self.ov_core = ov.Core() self.ov_model = self.ov_core.read_model(detector_config.model.path) diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 61dd73847..4d88f9af3 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -52,7 +52,6 @@ class TestConfig(unittest.TestCase): }, "openvino": { "type": "openvino", - "device": "usb", }, }, "model": {"path": "/default.tflite", "width": 512}, @@ -71,6 +70,7 @@ class TestConfig(unittest.TestCase): assert runtime_config.detectors["cpu"].num_threads == 3 assert runtime_config.detectors["edgetpu"].device == "usb" + assert runtime_config.detectors["openvino"].device == "AUTO" assert runtime_config.model.path == "/default.tflite" assert runtime_config.detectors["cpu"].model.path == "/cpu_model.tflite" diff --git a/frigate/test/test_object_detector.py b/frigate/test/test_object_detector.py index 860f90518..9cdeeb6c7 100644 --- a/frigate/test/test_object_detector.py +++ b/frigate/test/test_object_detector.py @@ -2,6 +2,7 @@ import unittest from unittest.mock import Mock, patch import numpy as np +from pydantic import parse_obj_as from frigate.config import DetectorConfig, InputTensorEnum, ModelConfig from frigate.detectors import DetectorTypeEnum @@ -17,7 +18,9 @@ class TestLocalObjectDetector(unittest.TestCase): "frigate.detectors.api_types", {det_type: Mock() for det_type in DetectorTypeEnum}, ): - test_cfg = DetectorConfig.parse_obj({"type": det_type, "model": {}}) + test_cfg = parse_obj_as( + DetectorConfig, ({"type": det_type, "model": {}}) + ) test_cfg.model.path = "/test/modelpath" test_obj = frigate.object_detection.LocalObjectDetector( detector_config=test_cfg @@ -40,7 +43,7 @@ class TestLocalObjectDetector(unittest.TestCase): TEST_DATA = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32]) test_obj_detect = frigate.object_detection.LocalObjectDetector( - detector_config=DetectorConfig(type="cpu", model=ModelConfig()) + detector_config=parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}}) ) mock_det_api = mock_cputfl.return_value @@ -63,7 +66,7 @@ class TestLocalObjectDetector(unittest.TestCase): TEST_DATA = np.zeros((1, 32, 32, 3), np.uint8) TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32]) - test_cfg = DetectorConfig(type="cpu", model=ModelConfig()) + test_cfg = parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}}) test_cfg.model.input_tensor = InputTensorEnum.nchw test_obj_detect = frigate.object_detection.LocalObjectDetector( @@ -112,7 +115,7 @@ class TestLocalObjectDetector(unittest.TestCase): "label-5", ] - test_cfg = DetectorConfig(type="cpu", model=ModelConfig()) + test_cfg = parse_obj_as(DetectorConfig, {"type": "cpu", "model": {}}) test_cfg.model = ModelConfig() test_obj_detect = frigate.object_detection.LocalObjectDetector( detector_config=test_cfg,