Compare commits

..

No commits in common. "541b6f7c41e937297029ad45b94fee288baf1c11" and "8db3f8fc09f0d31dde364d84f9ed66a071f85abe" have entirely different histories.

3 changed files with 6 additions and 61 deletions

View File

@ -3,9 +3,9 @@ import os.path
import re import re
import urllib.request import urllib.request
from typing import Literal from typing import Literal
from pydantic import ConfigDict, Field
import axengine as axe import axengine as axe
from pydantic import ConfigDict
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi

View File

@ -268,7 +268,7 @@
}, },
"skip_motion_threshold": { "skip_motion_threshold": {
"label": "Skip motion threshold", "label": "Skip motion threshold",
"description": "If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature." "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0."
}, },
"improve_contrast": { "improve_contrast": {
"label": "Improve contrast", "label": "Improve contrast",

View File

@ -290,61 +290,6 @@
"label": "Detector Type", "label": "Detector Type",
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." "description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')."
}, },
"axengine": {
"label": "AXEngine NPU",
"description": "AXERA AX650N/AX8850N NPU detector running compiled .axmodel files via the AXEngine runtime.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
},
"cpu": { "cpu": {
"label": "CPU", "label": "CPU",
"description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.", "description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.",
@ -1450,7 +1395,7 @@
}, },
"skip_motion_threshold": { "skip_motion_threshold": {
"label": "Skip motion threshold", "label": "Skip motion threshold",
"description": "If set to a value between 0.0 and 1.0, and more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Leave unset (None) to disable this feature." "description": "If more than this fraction of the image changes in a single frame, the detector will return no motion boxes and immediately recalibrate. This can save CPU and reduce false positives during lightning, storms, etc., but may miss real events such as a PTZ camera autotracking an object. The tradeoff is between dropping a few megabytes of recordings versus reviewing a couple short clips. Range 0.0 to 1.0."
}, },
"improve_contrast": { "improve_contrast": {
"label": "Improve contrast", "label": "Improve contrast",
@ -1962,8 +1907,8 @@
"description": "Trigger a full reindex of historical tracked objects into the embeddings database." "description": "Trigger a full reindex of historical tracked objects into the embeddings database."
}, },
"model": { "model": {
"label": "Semantic search model or GenAI provider name", "label": "Semantic search model",
"description": "The embeddings model to use for semantic search (for example 'jinav1'), or the name of a GenAI provider with the embeddings role." "description": "The embeddings model to use for semantic search (for example 'jinav1')."
}, },
"model_size": { "model_size": {
"label": "Model size", "label": "Model size",