diff --git a/frigate/detectors/plugins/axengine.py b/frigate/detectors/plugins/axengine.py index 681a9b996..20d6a42a6 100644 --- a/frigate/detectors/plugins/axengine.py +++ b/frigate/detectors/plugins/axengine.py @@ -29,7 +29,7 @@ class AxengineDetectorConfig(BaseDetectorConfig): model_config = ConfigDict( title="AXEngine NPU", ) - + type: Literal[DETECTOR_KEY] diff --git a/web/public/locales/en/config/cameras.json b/web/public/locales/en/config/cameras.json index 5880d30c3..0ae231c37 100644 --- a/web/public/locales/en/config/cameras.json +++ b/web/public/locales/en/config/cameras.json @@ -268,7 +268,7 @@ }, "skip_motion_threshold": { "label": "Skip motion threshold", - "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0." + "description": "If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature." }, "improve_contrast": { "label": "Improve contrast", diff --git a/web/public/locales/en/config/global.json b/web/public/locales/en/config/global.json index 5268c1b02..fdfc4b389 100644 --- a/web/public/locales/en/config/global.json +++ b/web/public/locales/en/config/global.json @@ -290,6 +290,61 @@ "label": "Detector Type", "description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." }, + "axengine": { + "label": "AXEngine NPU", + "description": "AXERA AX650N/AX8850N NPU detector running compiled .axmodel files via the AXEngine runtime.", + "type": { + "label": "Type" + }, + "model": { + "label": "Detector specific model configuration", + "description": "Detector-specific model configuration options (path, input size, etc.).", + "path": { + "label": "Custom Object detection model path", + "description": "Path to a custom detection model file (or plus:// for Frigate+ models)." + }, + "labelmap_path": { + "label": "Label map for custom object detector", + "description": "Path to a labelmap file that maps numeric classes to string labels for the detector." + }, + "width": { + "label": "Object detection model input width", + "description": "Width of the model input tensor in pixels." + }, + "height": { + "label": "Object detection model input height", + "description": "Height of the model input tensor in pixels." + }, + "labelmap": { + "label": "Labelmap customization", + "description": "Overrides or remapping entries to merge into the standard labelmap." + }, + "attributes_map": { + "label": "Map of object labels to their attribute labels", + "description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])." + }, + "input_tensor": { + "label": "Model Input Tensor Shape", + "description": "Tensor format expected by the model: 'nhwc' or 'nchw'." + }, + "input_pixel_format": { + "label": "Model Input Pixel Color Format", + "description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'." + }, + "input_dtype": { + "label": "Model Input D Type", + "description": "Data type of the model input tensor (for example 'float32')." + }, + "model_type": { + "label": "Object Detection Model Type", + "description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization." + } + }, + "model_path": { + "label": "Detector specific model path", + "description": "File path to the detector model binary if required by the chosen detector." + } + }, "cpu": { "label": "CPU", "description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.", @@ -1395,7 +1450,7 @@ }, "skip_motion_threshold": { "label": "Skip motion threshold", - "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0." + "description": "If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera auto‑tracking an object. The trade‑off is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature." }, "improve_contrast": { "label": "Improve contrast", @@ -1907,8 +1962,8 @@ "description": "Trigger a full reindex of historical tracked objects into the embeddings database." }, "model": { - "label": "Semantic search model", - "description": "The embeddings model to use for semantic search (for example 'jinav1')." + "label": "Semantic search model or GenAI provider name", + "description": "The embeddings model to use for semantic search (for example 'jinav1'), or the name of a GenAI provider with the embeddings role." }, "model_size": { "label": "Model size",