clean up detectors section and i18n

This commit is contained in:
Josh Hawkins 2026-02-06 07:56:30 -06:00
parent d55b6226d0
commit 19acfe0f7c
10 changed files with 1740 additions and 133 deletions

View File

@ -190,15 +190,15 @@ def generate_section_translation(config_class: type) -> Dict[str, Any]:
def get_detector_translations(
config_schema: Dict[str, Any],
) -> tuple[Dict[str, Any], Dict[str, Any]]:
"""Build detector field and type translations based on schema definitions."""
) -> tuple[Dict[str, Any], set[str]]:
"""Build detector type translations with nested fields based on schema definitions."""
defs = config_schema.get("$defs", {})
detector_schema = defs.get("DetectorConfig", {})
discriminator = detector_schema.get("discriminator", {})
mapping = discriminator.get("mapping", {})
type_translations: Dict[str, Any] = {}
field_translations: Dict[str, Any] = {}
nested_field_keys: set[str] = set()
for detector_type, ref in mapping.items():
if not isinstance(ref, str):
continue
@ -219,16 +219,18 @@ def get_detector_translations(
if description:
type_entry["description"] = description
if type_entry:
type_translations[detector_type] = type_entry
nested = extract_translations_from_schema(ref_schema, defs=defs)
nested_without_root = {
k: v for k, v in nested.items() if k not in ("label", "description")
}
field_translations.update(nested_without_root)
if nested_without_root:
type_entry.update(nested_without_root)
nested_field_keys.update(nested_without_root.keys())
return field_translations, type_translations
if type_entry:
type_translations[detector_type] = type_entry
return type_translations, nested_field_keys
def main():
@ -301,9 +303,14 @@ def main():
section_data.update(nested_without_root)
if field_name == "detectors":
detector_fields, detector_types = get_detector_translations(config_schema)
section_data.update(detector_fields)
detector_types, detector_field_keys = get_detector_translations(
config_schema
)
section_data.update(detector_types)
for key in detector_field_keys:
if key == "type":
continue
section_data.pop(key, None)
if not section_data:
logger.warning(f"No translations found for section: {field_name}")

View File

@ -287,155 +287,791 @@
"label": "Detector hardware",
"description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"num_threads": {
"label": "Number of detection threads",
"description": "The number of threads used for CPU-based inference."
},
"api_url": {
"label": "DeepStack API URL",
"description": "The URL of the DeepStack API."
},
"api_timeout": {
"label": "DeepStack API timeout (in seconds)",
"description": "Maximum time allowed for a DeepStack API request."
},
"api_key": {
"label": "DeepStack API key (if required)",
"description": "Optional API key for authenticated DeepStack services."
},
"location": {
"label": "Inference Location",
"description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')."
},
"zoo": {
"label": "Model Zoo",
"description": "Path or URL to the DeGirum model zoo."
},
"token": {
"label": "DeGirum Cloud Token",
"description": "Token for DeGirum Cloud access."
},
"device": {
"label": "GPU Device Index",
"description": "The GPU device index to use."
},
"num_cores": {
"label": "Number of NPU cores to use.",
"description": "The number of NPU cores to use (0 for auto)."
},
"endpoint": {
"label": "ZMQ IPC endpoint",
"description": "The ZMQ endpoint to connect to."
},
"request_timeout_ms": {
"label": "ZMQ request timeout in milliseconds",
"description": "Timeout for ZMQ requests in milliseconds."
},
"linger_ms": {
"label": "ZMQ socket linger in milliseconds",
"description": "Socket linger period in milliseconds."
"label": "Detector Type",
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')."
},
"cpu": {
"label": "CPU",
"description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended."
"description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"num_threads": {
"label": "Number of detection threads",
"description": "The number of threads used for CPU-based inference."
}
},
"deepstack": {
"label": "DeepStack",
"description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended."
"description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"api_url": {
"label": "DeepStack API URL",
"description": "The URL of the DeepStack API."
},
"api_timeout": {
"label": "DeepStack API timeout (in seconds)",
"description": "Maximum time allowed for a DeepStack API request."
},
"api_key": {
"label": "DeepStack API key (if required)",
"description": "Optional API key for authenticated DeepStack services."
}
},
"degirum": {
"label": "DeGirum",
"description": "DeGirum detector for running models via DeGirum cloud or local inference services."
"description": "DeGirum detector for running models via DeGirum cloud or local inference services.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"location": {
"label": "Inference Location",
"description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')."
},
"zoo": {
"label": "Model Zoo",
"description": "Path or URL to the DeGirum model zoo."
},
"token": {
"label": "DeGirum Cloud Token",
"description": "Token for DeGirum Cloud access."
}
},
"edgetpu": {
"label": "EdgeTPU",
"description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate."
"description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "Device Type",
"description": "The device to use for EdgeTPU inference (e.g. 'usb', 'pci')."
}
},
"hailo8l": {
"label": "Hailo-8/Hailo-8L",
"description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware."
"description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
}
},
"memryx": {
"label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators."
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "Device Path",
"description": "The device to use for MemryX inference (e.g. 'PCIe')."
}
},
"onnx": {
"label": "ONNX",
"description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available."
"description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "Device Type",
"description": "The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU')."
}
},
"openvino": {
"label": "OpenVINO",
"description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware."
"description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "Device Type",
"description": "The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU')."
}
},
"rknn": {
"label": "RKNN",
"description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware."
"description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"num_cores": {
"label": "Number of NPU cores to use.",
"description": "The number of NPU cores to use (0 for auto)."
}
},
"synaptics": {
"label": "Synaptics",
"description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware."
"description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
},
"teflon_tfl": {
"label": "Teflon",
"description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs."
"description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
},
"tensorrt": {
"label": "TensorRT",
"description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference."
"description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": {
"label": "GPU Device Index",
"description": "The GPU device index to use."
}
},
"zmq": {
"label": "ZMQ IPC",
"description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint."
"description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"endpoint": {
"label": "ZMQ IPC endpoint",
"description": "The ZMQ endpoint to connect to."
},
"request_timeout_ms": {
"label": "ZMQ request timeout in milliseconds",
"description": "Timeout for ZMQ requests in milliseconds."
},
"linger_ms": {
"label": "ZMQ socket linger in milliseconds",
"description": "Socket linger period in milliseconds."
}
}
},
"model": {

View File

@ -1234,6 +1234,14 @@
"detect": {
"title": "Detection Settings"
},
"detectors": {
"title": "Detector Settings",
"singleType": "Only one {{type}} detector is allowed.",
"keyRequired": "Detector name is required.",
"keyDuplicate": "Detector name already exists.",
"noSchema": "No detector schemas are available.",
"none": "No detector instances configured."
},
"record": {
"title": "Recording Settings"
},

View File

@ -1,17 +1,28 @@
import type { SectionConfigOverrides } from "./types";
const detectorHiddenFields = [
"*.model.labelmap",
"*.model.attributes_map",
"*.model",
"*.model_path",
];
const detectors: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/object_detectors",
restartRequired: [],
fieldOrder: [],
advancedFields: [],
hiddenFields: [
"*.model.labelmap",
"*.model.attributes_map",
"*.model",
"*.model_path",
],
hiddenFields: detectorHiddenFields,
uiSchema: {
"ui:field": "DetectorHardwareField",
"ui:options": {
multiInstanceTypes: ["cpu", "onnx", "openvino"],
typeOrder: ["onnx", "openvino", "edgetpu"],
hiddenByType: {},
hiddenFields: detectorHiddenFields,
},
},
},
};

View File

@ -0,0 +1,869 @@
import type {
ErrorSchema,
FieldPathList,
FieldProps,
RJSFSchema,
UiSchema,
} from "@rjsf/utils";
import { toFieldPathId } from "@rjsf/utils";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next";
import {
LuChevronDown,
LuChevronRight,
LuPlus,
LuTrash2,
} from "react-icons/lu";
import { applySchemaDefaults } from "@/lib/config-schema";
import { cn, isJsonObject, mergeUiSchema } from "@/lib/utils";
import { ConfigFormContext, JsonObject } from "@/types/configForm";
import { Button } from "@/components/ui/button";
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/ui/collapsible";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { humanizeKey } from "../utils/i18n";
type DetectorHardwareFieldOptions = {
multiInstanceTypes?: string[];
hiddenByType?: Record<string, string[]>;
hiddenFields?: string[];
typeOrder?: string[];
};
type DetectorSchemaEntry = {
type: string;
schema: RJSFSchema;
};
const DEFAULT_MULTI_INSTANCE_TYPES = ["cpu", "onnx", "openvino"];
const EMPTY_HIDDEN_BY_TYPE: Record<string, string[]> = {};
const EMPTY_HIDDEN_FIELDS: string[] = [];
const EMPTY_TYPE_ORDER: string[] = [];
const isSchemaObject = (schema: unknown): schema is RJSFSchema =>
typeof schema === "object" && schema !== null;
const getUnionSchemas = (schema?: RJSFSchema): RJSFSchema[] => {
if (!schema) {
return [];
}
const schemaObj = schema as Record<string, unknown>;
const union = schemaObj.oneOf ?? schemaObj.anyOf;
if (Array.isArray(union)) {
return union.filter(isSchemaObject) as RJSFSchema[];
}
return [schema];
};
const getTypeValues = (schema: RJSFSchema): string[] => {
const schemaObj = schema as Record<string, unknown>;
const properties = schemaObj.properties as
| Record<string, unknown>
| undefined;
const typeSchema = properties?.type as Record<string, unknown> | undefined;
const values: string[] = [];
if (typeof typeSchema?.const === "string") {
values.push(typeSchema.const);
}
if (Array.isArray(typeSchema?.enum)) {
typeSchema.enum.forEach((value) => {
if (typeof value === "string") {
values.push(value);
}
});
}
return values;
};
const buildHiddenUiSchema = (paths: string[]): UiSchema => {
const result: UiSchema = {};
paths.forEach((path) => {
if (!path) {
return;
}
const segments = path.split(".").filter(Boolean);
if (segments.length === 0) {
return;
}
let cursor = result;
segments.forEach((segment, index) => {
if (index === segments.length - 1) {
cursor[segment] = {
...(cursor[segment] as UiSchema | undefined),
"ui:widget": "hidden",
} as UiSchema;
return;
}
const existing = (cursor[segment] as UiSchema | undefined) ?? {};
cursor[segment] = existing;
cursor = existing;
});
});
return result;
};
const getInstanceType = (value: unknown): string | undefined => {
if (!isJsonObject(value)) {
return undefined;
}
const typeValue = value.type;
return typeof typeValue === "string" && typeValue.length > 0
? typeValue
: undefined;
};
export function DetectorHardwareField(props: FieldProps) {
const {
schema,
uiSchema,
registry,
fieldPathId,
formData: rawFormData,
errorSchema,
disabled,
readonly,
hideError,
onBlur,
onFocus,
onChange,
} = props;
const formContext = registry.formContext as ConfigFormContext | undefined;
const configNamespace =
formContext?.i18nNamespace ??
(formContext?.level === "camera" ? "config/cameras" : "config/global");
const { t: fallbackT } = useTranslation(["common", configNamespace]);
const t = formContext?.t ?? fallbackT;
const sectionPrefix = formContext?.sectionI18nPrefix ?? "detectors";
const options =
(uiSchema?.["ui:options"] as DetectorHardwareFieldOptions | undefined) ??
{};
const multiInstanceTypes =
options.multiInstanceTypes ?? DEFAULT_MULTI_INSTANCE_TYPES;
const hiddenByType = options.hiddenByType ?? EMPTY_HIDDEN_BY_TYPE;
const hiddenFields = options.hiddenFields ?? EMPTY_HIDDEN_FIELDS;
const typeOrder = options.typeOrder ?? EMPTY_TYPE_ORDER;
const multiInstanceSet = useMemo(
() => new Set(multiInstanceTypes),
[multiInstanceTypes],
);
const globalHiddenFields = useMemo(
() =>
hiddenFields
.map((path) => (path.startsWith("*.") ? path.slice(2) : path))
.filter((path) => path.length > 0),
[hiddenFields],
);
const detectorConfigSchema = useMemo(() => {
const additional = (schema as RJSFSchema | undefined)?.additionalProperties;
if (isSchemaObject(additional)) {
return additional as RJSFSchema;
}
const rootSchema = registry.rootSchema as Record<string, unknown>;
const defs =
(rootSchema?.$defs as Record<string, unknown> | undefined) ??
(rootSchema?.definitions as Record<string, unknown> | undefined);
const fallback = defs?.DetectorConfig;
return isSchemaObject(fallback) ? (fallback as RJSFSchema) : undefined;
}, [schema, registry.rootSchema]);
const detectorSchemas = useMemo<DetectorSchemaEntry[]>(() => {
const entries: DetectorSchemaEntry[] = [];
getUnionSchemas(detectorConfigSchema).forEach((schema) => {
const types = getTypeValues(schema);
types.forEach((type) => {
entries.push({ type, schema });
});
});
return entries;
}, [detectorConfigSchema]);
const detectorSchemaByType = useMemo(() => {
const map = new Map<string, RJSFSchema>();
detectorSchemas.forEach(({ type, schema }) => {
if (!map.has(type)) {
map.set(type, schema);
}
});
return map;
}, [detectorSchemas]);
const availableTypes = useMemo(
() => detectorSchemas.map((entry) => entry.type),
[detectorSchemas],
);
const orderedTypes = useMemo(() => {
if (!typeOrder.length) {
return availableTypes;
}
const availableSet = new Set(availableTypes);
const ordered = typeOrder.filter((type) => availableSet.has(type));
const orderedSet = new Set(ordered);
const remaining = availableTypes.filter((type) => !orderedSet.has(type));
return [...ordered, ...remaining];
}, [availableTypes, typeOrder]);
const formData = isJsonObject(rawFormData) ? rawFormData : {};
const detectors = formData as JsonObject;
const [addType, setAddType] = useState<string | undefined>(orderedTypes[0]);
const [addError, setAddError] = useState<string | undefined>();
const [renameDrafts, setRenameDrafts] = useState<Record<string, string>>({});
const [renameErrors, setRenameErrors] = useState<Record<string, string>>({});
const [typeErrors, setTypeErrors] = useState<Record<string, string>>({});
const [openKeys, setOpenKeys] = useState<Set<string>>(
() => new Set(Object.keys(detectors)),
);
useEffect(() => {
if (!orderedTypes.length) {
setAddType(undefined);
return;
}
if (!addType || !orderedTypes.includes(addType)) {
setAddType(orderedTypes[0]);
}
}, [orderedTypes, addType]);
useEffect(() => {
setOpenKeys((prev) => {
const next = new Set<string>();
Object.keys(detectors).forEach((key) => {
if (prev.has(key)) {
next.add(key);
}
});
return next;
});
setRenameDrafts((prev) => {
const next: Record<string, string> = {};
Object.keys(detectors).forEach((key) => {
if (prev[key] !== undefined) {
next[key] = prev[key];
}
});
return next;
});
setRenameErrors((prev) => {
const next: Record<string, string> = {};
Object.keys(detectors).forEach((key) => {
if (prev[key] !== undefined) {
next[key] = prev[key];
}
});
return next;
});
setTypeErrors((prev) => {
const next: Record<string, string> = {};
Object.keys(detectors).forEach((key) => {
if (prev[key] !== undefined) {
next[key] = prev[key];
}
});
return next;
});
}, [detectors]);
const updateDetectors = useCallback(
(nextDetectors: JsonObject) => {
onChange(nextDetectors as unknown, [] as FieldPathList);
},
[onChange],
);
const getTypeLabel = useCallback(
(type: string) =>
t(`${sectionPrefix}.${type}.label`, {
ns: configNamespace,
defaultValue: humanizeKey(type),
}),
[t, sectionPrefix, configNamespace],
);
const getTypeDescription = useCallback(
(type: string) =>
t(`${sectionPrefix}.${type}.description`, {
ns: configNamespace,
defaultValue: "",
}),
[t, sectionPrefix, configNamespace],
);
const isSingleInstanceType = useCallback(
(type: string) => !multiInstanceSet.has(type),
[multiInstanceSet],
);
const getDetectorDefaults = useCallback(
(type: string) => {
const schema = detectorSchemaByType.get(type);
if (!schema) {
return { type };
}
const base = { type } as Record<string, unknown>;
const withDefaults = applySchemaDefaults(schema, base);
return { ...withDefaults, type } as Record<string, unknown>;
},
[detectorSchemaByType],
);
const resolveDuplicateType = useCallback(
(targetType: string, excludeKey?: string) => {
return Object.entries(detectors).some(([key, value]) => {
if (excludeKey && key === excludeKey) {
return false;
}
return getInstanceType(value) === targetType;
});
},
[detectors],
);
const handleAdd = useCallback(() => {
if (!addType) {
setAddError(
t("selectItem", {
ns: "common",
defaultValue: "Select {{item}}",
item: t("detectors.type.label", {
ns: configNamespace,
defaultValue: "Type",
}),
}),
);
return;
}
if (isSingleInstanceType(addType) && resolveDuplicateType(addType)) {
setAddError(
t("configForm.detectors.singleType", {
ns: "views/settings",
defaultValue: "Only one {{type}} detector is allowed.",
type: getTypeLabel(addType),
}),
);
return;
}
const baseKey = addType;
let nextKey = baseKey;
let index = 2;
while (Object.prototype.hasOwnProperty.call(detectors, nextKey)) {
nextKey = `${baseKey}${index}`;
index += 1;
}
const nextDetectors = {
...detectors,
[nextKey]: getDetectorDefaults(addType),
} as JsonObject;
setAddError(undefined);
setOpenKeys((prev) => {
const next = new Set(prev);
next.add(nextKey);
return next;
});
updateDetectors(nextDetectors);
}, [
addType,
t,
configNamespace,
detectors,
getDetectorDefaults,
getTypeLabel,
isSingleInstanceType,
resolveDuplicateType,
updateDetectors,
]);
const handleRemove = useCallback(
(key: string) => {
const { [key]: _, ...rest } = detectors;
updateDetectors(rest as JsonObject);
setOpenKeys((prev) => {
const next = new Set(prev);
next.delete(key);
return next;
});
},
[detectors, updateDetectors],
);
const commitRename = useCallback(
(key: string, nextKey: string) => {
const trimmed = nextKey.trim();
if (!trimmed) {
setRenameErrors((prev) => ({
...prev,
[key]: t("configForm.detectors.keyRequired", {
ns: "views/settings",
defaultValue: "Detector name is required.",
}),
}));
return;
}
if (trimmed !== key && detectors[trimmed] !== undefined) {
setRenameErrors((prev) => ({
...prev,
[key]: t("configForm.detectors.keyDuplicate", {
ns: "views/settings",
defaultValue: "Detector name already exists.",
}),
}));
return;
}
setRenameErrors((prev) => {
const { [key]: _, ...rest } = prev;
return rest;
});
setRenameDrafts((prev) => {
const { [key]: _, ...rest } = prev;
return rest;
});
if (trimmed === key) {
return;
}
const { [key]: value, ...rest } = detectors;
const nextDetectors = { ...rest, [trimmed]: value } as JsonObject;
setOpenKeys((prev) => {
const next = new Set(prev);
if (next.delete(key)) {
next.add(trimmed);
}
return next;
});
updateDetectors(nextDetectors);
},
[detectors, t, updateDetectors],
);
const handleTypeChange = useCallback(
(key: string, nextType: string) => {
const currentType = getInstanceType(detectors[key]);
if (!nextType || nextType === currentType) {
return;
}
if (
isSingleInstanceType(nextType) &&
resolveDuplicateType(nextType, key)
) {
setTypeErrors((prev) => ({
...prev,
[key]: t("configForm.detectors.singleType", {
ns: "views/settings",
defaultValue: "Only one {{type}} detector is allowed.",
type: getTypeLabel(nextType),
}),
}));
return;
}
setTypeErrors((prev) => {
const { [key]: _, ...rest } = prev;
return rest;
});
const nextDetectors = {
...detectors,
[key]: getDetectorDefaults(nextType),
} as JsonObject;
updateDetectors(nextDetectors);
},
[
detectors,
getDetectorDefaults,
getTypeLabel,
isSingleInstanceType,
resolveDuplicateType,
t,
updateDetectors,
],
);
const getInstanceUiSchema = useCallback(
(type: string) => {
const baseUiSchema =
(uiSchema?.additionalProperties as UiSchema | undefined) ?? {};
const globalHidden = buildHiddenUiSchema(globalHiddenFields);
const hiddenOverrides = buildHiddenUiSchema(hiddenByType[type] ?? []);
const typeHidden = { type: { "ui:widget": "hidden" } } as UiSchema;
const withGlobalHidden = mergeUiSchema(baseUiSchema, globalHidden);
const withTypeHidden = mergeUiSchema(withGlobalHidden, hiddenOverrides);
return mergeUiSchema(withTypeHidden, typeHidden);
},
[globalHiddenFields, hiddenByType, uiSchema?.additionalProperties],
);
const renderInstanceForm = useCallback(
(key: string, value: unknown) => {
const SchemaField = registry.fields.SchemaField;
const type = getInstanceType(value);
const schema = type ? detectorSchemaByType.get(type) : undefined;
if (!SchemaField || !schema || !type) {
return null;
}
const instanceUiSchema = getInstanceUiSchema(type);
const instanceFieldPathId = toFieldPathId(
key,
registry.globalFormOptions,
fieldPathId.path,
);
const instanceErrorSchema = (
errorSchema as Record<string, ErrorSchema> | undefined
)?.[key];
const handleInstanceChange = (
nextValue: unknown,
_path: FieldPathList,
_errors?: ErrorSchema,
_id?: string,
) => {
const nextDetectors = {
...detectors,
[key]: nextValue ?? {},
} as JsonObject;
updateDetectors(nextDetectors);
};
return (
<SchemaField
name={key}
schema={schema}
uiSchema={instanceUiSchema}
fieldPathId={instanceFieldPathId}
formData={value}
errorSchema={instanceErrorSchema}
onChange={handleInstanceChange}
onBlur={onBlur}
onFocus={onFocus}
registry={registry}
disabled={disabled}
readonly={readonly}
hideError={hideError}
/>
);
},
[
detectorSchemaByType,
detectors,
getInstanceUiSchema,
disabled,
errorSchema,
fieldPathId,
hideError,
onBlur,
onFocus,
readonly,
registry,
updateDetectors,
],
);
if (!availableTypes.length) {
return (
<p className="text-sm text-muted-foreground">
{t("configForm.detectors.noSchema", {
ns: "views/settings",
defaultValue: "No detector schemas are available.",
})}
</p>
);
}
const detectorEntries = Object.entries(detectors);
const isDisabled = Boolean(disabled || readonly);
const addLabel = `${t("button.add", {
ns: "common",
defaultValue: "Add",
})} ${t("detectors.label", {
ns: configNamespace,
defaultValue: "Detector hardware",
})}`;
return (
<div className="space-y-4">
{detectorEntries.length === 0 ? (
<p className="text-sm text-muted-foreground">
{t("configForm.detectors.none", {
ns: "views/settings",
defaultValue: "No detector instances configured.",
})}
</p>
) : (
<div className="space-y-3">
{detectorEntries.map(([key, value]) => {
const type = getInstanceType(value) ?? "";
const typeLabel = type ? getTypeLabel(type) : key;
const typeDescription = type ? getTypeDescription(type) : "";
const isOpen = openKeys.has(key);
const renameDraft = renameDrafts[key] ?? key;
return (
<div key={key} className="rounded-lg border bg-card">
<Collapsible
open={isOpen}
onOpenChange={(open) => {
setOpenKeys((prev) => {
const next = new Set(prev);
if (open) {
next.add(key);
} else {
next.delete(key);
}
return next;
});
}}
>
<div className="flex items-start justify-between gap-4 p-4">
<div className="flex items-start gap-3">
<CollapsibleTrigger asChild>
<Button
type="button"
variant="ghost"
size="xs"
className="mt-0.5"
>
{isOpen ? (
<LuChevronDown className="h-4 w-4" />
) : (
<LuChevronRight className="h-4 w-4" />
)}
</Button>
</CollapsibleTrigger>
<div>
<div className="text-sm font-medium">
{typeLabel}
<span className="ml-2 text-xs text-muted-foreground">
{key}
</span>
</div>
{typeDescription && (
<div className="text-xs text-muted-foreground">
{typeDescription}
</div>
)}
</div>
</div>
<Button
type="button"
variant="ghost"
size="xs"
onClick={() => handleRemove(key)}
disabled={isDisabled}
>
<LuTrash2 className="h-4 w-4" />
</Button>
</div>
<CollapsibleContent>
<div className="space-y-4 border-t p-4">
<div className="grid gap-4 md:grid-cols-4">
<div className="space-y-2">
<Label>
{t("label.ID", {
ns: "common",
defaultValue: "ID",
})}
</Label>
<Input
value={renameDraft}
disabled={isDisabled}
onChange={(event) => {
setRenameDrafts((prev) => ({
...prev,
[key]: event.target.value,
}));
}}
onBlur={(event) =>
commitRename(key, event.target.value)
}
onKeyDown={(event) => {
if (event.key === "Enter") {
event.preventDefault();
commitRename(key, renameDraft);
}
}}
/>
<p className="text-xs text-muted-foreground">
{t("field.internalID", {
ns: "common",
defaultValue:
"The Internal ID Frigate uses in the configuration and database",
})}
</p>
{renameErrors[key] && (
<p className="text-xs text-danger">
{renameErrors[key]}
</p>
)}
</div>
<div className="col-span-3 space-y-2">
<Label>
{t("detectors.type.label", {
ns: configNamespace,
defaultValue: "Type",
})}
</Label>
<Select
value={type}
onValueChange={(value) =>
handleTypeChange(key, value)
}
disabled={isDisabled}
>
<SelectTrigger className="w-full">
<SelectValue
placeholder={t("selectItem", {
ns: "common",
defaultValue: "Select {{item}}",
item: t("detectors.type.label", {
ns: configNamespace,
defaultValue: "Type",
}),
})}
/>
</SelectTrigger>
<SelectContent>
{orderedTypes.map((option) => (
<SelectItem key={option} value={option}>
{getTypeLabel(option)}
</SelectItem>
))}
</SelectContent>
</Select>
{typeErrors[key] && (
<p className="text-xs text-danger">
{typeErrors[key]}
</p>
)}
</div>
</div>
<div
className={cn(
"rounded-md border border-border/70 bg-background p-0",
readonly && "opacity-90",
)}
>
{renderInstanceForm(key, value)}
</div>
</div>
</CollapsibleContent>
</Collapsible>
</div>
);
})}
</div>
)}
<div className="flex justify-start">
<div className="w-full max-w-md rounded-lg border bg-card p-4">
<div className="text-sm font-medium text-muted-foreground">
{addLabel}
</div>
<div className="mt-3 flex flex-col gap-3 md:flex-row md:items-end">
<div className="flex-1 space-y-2">
<Label>
{t("detectors.type.label", {
ns: configNamespace,
defaultValue: "Type",
})}
</Label>
<Select
value={addType ?? ""}
onValueChange={(value) => {
setAddError(undefined);
setAddType(value);
}}
disabled={isDisabled}
>
<SelectTrigger className="w-full">
<SelectValue
placeholder={t("selectItem", {
ns: "common",
defaultValue: "Select {{item}}",
item: t("detectors.type.label", {
ns: configNamespace,
defaultValue: "Type",
}),
})}
/>
</SelectTrigger>
<SelectContent>
{orderedTypes.map((type) => (
<SelectItem key={type} value={type}>
{getTypeLabel(type)}
</SelectItem>
))}
</SelectContent>
</Select>
{addError && <p className="text-xs text-danger">{addError}</p>}
</div>
<div>
<Button
type="button"
variant="outline"
onClick={handleAdd}
disabled={isDisabled}
className="gap-2"
>
<LuPlus className="h-4 w-4" />
{t("button.add", {
ns: "common",
defaultValue: "Add",
})}
</Button>
</div>
</div>
</div>
</div>
</div>
);
}

View File

@ -1,2 +1,3 @@
// Custom RJSF Fields
export { LayoutGridField } from "./LayoutGridField";
export { DetectorHardwareField } from "./DetectorHardwareField";

View File

@ -36,6 +36,7 @@ import { MultiSchemaFieldTemplate } from "./templates/MultiSchemaFieldTemplate";
import { WrapIfAdditionalTemplate } from "./templates/WrapIfAdditionalTemplate";
import { LayoutGridField } from "./fields/LayoutGridField";
import { DetectorHardwareField } from "./fields/DetectorHardwareField";
export interface FrigateTheme {
widgets: RegistryWidgetsType;
@ -79,5 +80,6 @@ export const frigateTheme: FrigateTheme = {
},
fields: {
LayoutGridField: LayoutGridField,
DetectorHardwareField: DetectorHardwareField,
},
};

View File

@ -126,7 +126,11 @@ export function FieldTemplate(props: FieldTemplateProps) {
!isAdditionalProperty &&
!isArrayItemInAdditionalProp;
const translationPath = buildTranslationPath(pathSegments, sectionI18nPrefix);
const translationPath = buildTranslationPath(
pathSegments,
sectionI18nPrefix,
formContext,
);
const filterObjectLabel = getFilterObjectLabel(pathSegments);
const translatedFilterObjectLabel = filterObjectLabel
? getTranslatedLabel(filterObjectLabel, "object")

View File

@ -88,7 +88,11 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
? getTranslatedLabel(filterObjectLabel, "object")
: undefined;
if (path) {
translationPath = buildTranslationPath(path);
translationPath = buildTranslationPath(
path,
sectionI18nPrefix,
formContext,
);
// Also get the last property name for fallback label generation
for (let i = path.length - 1; i >= 0; i -= 1) {
const segment = path[i];

View File

@ -5,6 +5,46 @@
* for RJSF form fields.
*/
import type { ConfigFormContext } from "@/types/configForm";
const isRecord = (value: unknown): value is Record<string, unknown> =>
typeof value === "object" && value !== null;
const resolveDetectorType = (
detectorConfig: unknown,
detectorKey?: string,
): string | undefined => {
if (!detectorKey || !isRecord(detectorConfig)) {
return undefined;
}
const entry = detectorConfig[detectorKey];
if (!isRecord(entry)) {
return undefined;
}
const typeValue = entry.type;
return typeof typeValue === "string" && typeValue.length > 0
? typeValue
: undefined;
};
const resolveDetectorTypeFromContext = (
formContext: ConfigFormContext | undefined,
detectorKey?: string,
): string | undefined => {
const formData = formContext?.formData;
if (!detectorKey || !isRecord(formData)) {
return undefined;
}
const detectorConfig = isRecord(formData.detectors)
? formData.detectors
: formData;
return resolveDetectorType(detectorConfig, detectorKey);
};
/**
* Build the i18n translation key path for nested fields using the field path
* provided by RJSF. This avoids ambiguity with underscores in field names and
@ -12,16 +52,18 @@
*
* @param segments Array of path segments (strings and/or numbers)
* @param sectionI18nPrefix Optional section prefix for specialized sections
* @param formContext Optional form context for resolving detector types
* @returns Normalized translation key path as a dot-separated string
*
* @example
* buildTranslationPath(["filters", "person", "threshold"]) => "filters.threshold"
* buildTranslationPath(["detectors", "ov1", "type"]) => "detectors.type"
* buildTranslationPath(["model", "type"], "detectors") => "type"
* buildTranslationPath(["detectors", "ov1", "type"]) => "detectors.openvino.type"
* buildTranslationPath(["ov1", "type"], "detectors") => "openvino.type"
*/
export function buildTranslationPath(
segments: Array<string | number>,
sectionI18nPrefix?: string,
formContext?: ConfigFormContext,
): string {
// Filter out numeric indices to get string segments only
const stringSegments = segments.filter(
@ -39,10 +81,24 @@ export function buildTranslationPath(
return normalized.join(".");
}
// Handle detectors section - skip the dynamic detector name
// Example: detectors.ov1.type -> detectors.type
// Handle detectors section - resolve the detector type when available
// Example: detectors.ov1.type -> detectors.openvino.type
const detectorsIndex = stringSegments.indexOf("detectors");
if (detectorsIndex !== -1 && stringSegments.length > detectorsIndex + 2) {
const detectorKey = stringSegments[detectorsIndex + 1];
const detectorType = resolveDetectorTypeFromContext(
formContext,
detectorKey,
);
if (detectorType) {
const normalized = [
...stringSegments.slice(0, detectorsIndex + 1),
detectorType,
...stringSegments.slice(detectorsIndex + 2),
];
return normalized.join(".");
}
const normalized = [
...stringSegments.slice(0, detectorsIndex + 1),
...stringSegments.slice(detectorsIndex + 2),
@ -51,8 +107,17 @@ export function buildTranslationPath(
}
// Handle specialized sections like detectors where the first segment is dynamic
// Example: (sectionI18nPrefix="detectors") "ov1.type" -> "type"
// Example: (sectionI18nPrefix="detectors") "ov1.type" -> "openvino.type"
if (sectionI18nPrefix === "detectors" && stringSegments.length > 1) {
const detectorKey = stringSegments[0];
const detectorType = resolveDetectorTypeFromContext(
formContext,
detectorKey,
);
if (detectorType) {
return [detectorType, ...stringSegments.slice(1)].join(".");
}
return stringSegments.slice(1).join(".");
}