From 0d562ffc5ee9749746d754b3a373f4158f52ee98 Mon Sep 17 00:00:00 2001 From: GaryHuang-ASUS Date: Tue, 23 Sep 2025 11:25:00 +0800 Subject: [PATCH] [Update] Update document content and detector default layout - Update object_detectors document - Update detector's default layout - Update default model name --- docker/synaptics/Dockerfile | 2 +- docs/docs/configuration/object_detectors.md | 27 ++++++++++++--------- frigate/detectors/plugins/synaptics.py | 7 +++++- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/docker/synaptics/Dockerfile b/docker/synaptics/Dockerfile index ba32471a8..b7d2b8168 100644 --- a/docker/synaptics/Dockerfile +++ b/docker/synaptics/Dockerfile @@ -25,4 +25,4 @@ COPY --from=rootfs / / COPY --from=synap1680-wheels /rootfs/usr/local/lib/*.so /usr/lib -ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /model.synap +ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /synaptics/mobilenet.synap diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 71ad5e844..4f1e2a21b 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -850,27 +850,32 @@ Hardware accelerated object detection is supported on the following SoCs: This implementation uses the [Synaptics model conversion](https://synaptics-synap.github.io/doc/v/latest/docs/manual/introduction.html#offline-model-conversion), version v3.1.0. +This implementation is based on sdk `v1.5.0`. + See the [installation docs](../frigate/installation.md#synaptics) for information on configuring the SL-series NPU hardware. ### Configuration When configuring the Synap detector, you have to specify the model: a local **path**. -#### SSD +#### SSD Mobilenet -Use this configuration for ssd models. Here's a default pre-converted ssd model under the root folder. +A synap model is provided in the container at /mobilenet.synap and is used by this detector type by default. The model comes from [Synap-release Github](https://github.com/synaptics-astra/synap-release/tree/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80). + +Use the model configuration shown below when using the synaptics detector with the default synap model: ```yaml -detectors: - synap_npu: - type: synaptics +detectors: # required + synap_npu: # required + type: synaptics # required -model: - path: /model.synap - width: 224 - height: 224 - tensor_format: nhwc - labelmap_path: /labelmap/coco-80.txt +model: # required + path: /mobilenet.synap # required + width: 224 # required + height: 224 # required + # Currently, the tensor format is statically specify in the detector. + tensor_format: nhwc # optional + labelmap_path: /labelmap/coco-80.txt # required ``` ## Rockchip platform diff --git a/frigate/detectors/plugins/synaptics.py b/frigate/detectors/plugins/synaptics.py index 45d6b3a1b..8097b34f4 100644 --- a/frigate/detectors/plugins/synaptics.py +++ b/frigate/detectors/plugins/synaptics.py @@ -43,6 +43,7 @@ class SynapDetector(DetectionApi): self.model_type = detector_config.model.model_type self.network = synap_network self.network_input_details = self.network.inputs[0] + self.input_tensor_layout = detector_config.model.input_tensor # Create Inference Engine self.preprocessor = Preprocessor() @@ -50,7 +51,11 @@ class SynapDetector(DetectionApi): def detect_raw(self, tensor_input: np.ndarray): # It has only been testing for pre-converted mobilenet80 .tflite -> .synap model currently - postprocess_data = self.preprocessor.assign(self.network.inputs, tensor_input, Shape(tensor_input.shape), Layout.nhwc) + layout = Layout.nhwc # default layout + if self.input_tensor_layout == InputTensorEnum.nhwc: + layout = Layout.nhwc + + postprocess_data = self.preprocessor.assign(self.network.inputs, tensor_input, Shape(tensor_input.shape), layout) output_tensor_obj = self.network.predict() output = self.detector.process(output_tensor_obj, postprocess_data)