Settings i18n improvements (#22571)

* i18n improvements for settings UI

- deduplicate shared detector translation keys and centralize config translation resolution
- add missing i18n keys

* formatting
This commit is contained in:
Josh Hawkins 2026-03-22 13:03:24 -05:00 committed by GitHub
parent 74c89beaf9
commit b6c03c99de
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 217 additions and 809 deletions

View File

@ -190,20 +190,24 @@ def generate_section_translation(config_class: type) -> Dict[str, Any]:
def get_detector_translations( def get_detector_translations(
config_schema: Dict[str, Any], config_schema: Dict[str, Any],
) -> tuple[Dict[str, Any], set[str]]: ) -> tuple[Dict[str, Any], Dict[str, Any], set[str]]:
"""Build detector type translations with nested fields based on schema definitions.""" """Build detector type translations with nested fields based on schema definitions.
Returns a tuple of (type_translations, shared_fields, nested_field_keys).
Shared fields (identical across all detector types) are returned separately
to avoid duplication in the output.
"""
defs = config_schema.get("$defs", {}) defs = config_schema.get("$defs", {})
detector_schema = defs.get("DetectorConfig", {}) detector_schema = defs.get("DetectorConfig", {})
discriminator = detector_schema.get("discriminator", {}) discriminator = detector_schema.get("discriminator", {})
mapping = discriminator.get("mapping", {}) mapping = discriminator.get("mapping", {})
type_translations: Dict[str, Any] = {} # First pass: collect all nested fields per detector type
nested_field_keys: set[str] = set() all_nested: Dict[str, Dict[str, Any]] = {}
for detector_type, ref in mapping.items(): type_meta: Dict[str, Dict[str, str]] = {}
if not isinstance(ref, str):
continue
if not ref.startswith("#/$defs/"): for detector_type, ref in mapping.items():
if not isinstance(ref, str) or not ref.startswith("#/$defs/"):
continue continue
ref_name = ref.split("/")[-1] ref_name = ref.split("/")[-1]
@ -211,26 +215,49 @@ def get_detector_translations(
if not ref_schema: if not ref_schema:
continue continue
type_entry: Dict[str, str] = {} meta: Dict[str, str] = {}
title = ref_schema.get("title") title = ref_schema.get("title")
description = ref_schema.get("description") description = ref_schema.get("description")
if title: if title:
type_entry["label"] = title meta["label"] = title
if description: if description:
type_entry["description"] = description meta["description"] = description
type_meta[detector_type] = meta
nested = extract_translations_from_schema(ref_schema, defs=defs) nested = extract_translations_from_schema(ref_schema, defs=defs)
nested_without_root = { all_nested[detector_type] = {
k: v for k, v in nested.items() if k not in ("label", "description") k: v for k, v in nested.items() if k not in ("label", "description")
} }
if nested_without_root:
type_entry.update(nested_without_root) # Find fields that are identical across all types that have them
nested_field_keys.update(nested_without_root.keys()) shared_fields: Dict[str, Any] = {}
if all_nested:
# Collect all field keys across all types
all_keys: set[str] = set()
for nested in all_nested.values():
all_keys.update(nested.keys())
for key in all_keys:
values = [nested[key] for nested in all_nested.values() if key in nested]
if len(values) == len(all_nested) and all(v == values[0] for v in values):
shared_fields[key] = values[0]
# Build per-type translations with only unique (non-shared) fields
type_translations: Dict[str, Any] = {}
nested_field_keys: set[str] = set()
for detector_type, nested in all_nested.items():
type_entry: Dict[str, Any] = {}
type_entry.update(type_meta.get(detector_type, {}))
unique_fields = {k: v for k, v in nested.items() if k not in shared_fields}
if unique_fields:
type_entry.update(unique_fields)
nested_field_keys.update(unique_fields.keys())
if type_entry: if type_entry:
type_translations[detector_type] = type_entry type_translations[detector_type] = type_entry
return type_translations, nested_field_keys return type_translations, shared_fields, nested_field_keys
def main(): def main():
@ -303,9 +330,12 @@ def main():
section_data.update(nested_without_root) section_data.update(nested_without_root)
if field_name == "detectors": if field_name == "detectors":
detector_types, detector_field_keys = get_detector_translations( detector_types, shared_fields, detector_field_keys = (
config_schema get_detector_translations(config_schema)
) )
# Add shared fields at the base detectors level
section_data.update(shared_fields)
# Add per-type translations (only unique fields per type)
section_data.update(detector_types) section_data.update(detector_types)
for key in detector_field_keys: for key in detector_field_keys:
if key == "type": if key == "type":

View File

@ -287,118 +287,63 @@
"label": "Detector hardware", "label": "Detector hardware",
"description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.", "description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
"type": { "type": {
"label": "Detector Type", "label": "Type"
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." },
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}, },
"axengine": { "axengine": {
"label": "AXEngine NPU", "label": "AXEngine NPU",
"description": "AXERA AX650N/AX8850N NPU detector running compiled .axmodel files via the AXEngine runtime.", "description": "AXERA AX650N/AX8850N NPU detector running compiled .axmodel files via the AXEngine runtime."
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
}, },
"cpu": { "cpu": {
"label": "CPU", "label": "CPU",
"description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.", "description": "CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"num_threads": { "num_threads": {
"label": "Number of detection threads", "label": "Number of detection threads",
"description": "The number of threads used for CPU-based inference." "description": "The number of threads used for CPU-based inference."
@ -407,57 +352,6 @@
"deepstack": { "deepstack": {
"label": "DeepStack", "label": "DeepStack",
"description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.", "description": "DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"api_url": { "api_url": {
"label": "DeepStack API URL", "label": "DeepStack API URL",
"description": "The URL of the DeepStack API." "description": "The URL of the DeepStack API."
@ -474,57 +368,6 @@
"degirum": { "degirum": {
"label": "DeGirum", "label": "DeGirum",
"description": "DeGirum detector for running models via DeGirum cloud or local inference services.", "description": "DeGirum detector for running models via DeGirum cloud or local inference services.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"location": { "location": {
"label": "Inference Location", "label": "Inference Location",
"description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')." "description": "Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1')."
@ -541,57 +384,6 @@
"edgetpu": { "edgetpu": {
"label": "EdgeTPU", "label": "EdgeTPU",
"description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.", "description": "EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "Device Type", "label": "Device Type",
"description": "The device to use for EdgeTPU inference (e.g. 'usb', 'pci')." "description": "The device to use for EdgeTPU inference (e.g. 'usb', 'pci')."
@ -600,57 +392,6 @@
"hailo8l": { "hailo8l": {
"label": "Hailo-8/Hailo-8L", "label": "Hailo-8/Hailo-8L",
"description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.", "description": "Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "Device Type", "label": "Device Type",
"description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')." "description": "The device to use for Hailo inference (e.g. 'PCIe', 'M.2')."
@ -659,57 +400,6 @@
"memryx": { "memryx": {
"label": "MemryX", "label": "MemryX",
"description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.", "description": "MemryX MX3 detector that runs compiled DFP models on MemryX accelerators.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "Device Path", "label": "Device Path",
"description": "The device to use for MemryX inference (e.g. 'PCIe')." "description": "The device to use for MemryX inference (e.g. 'PCIe')."
@ -718,57 +408,6 @@
"onnx": { "onnx": {
"label": "ONNX", "label": "ONNX",
"description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.", "description": "ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "Device Type", "label": "Device Type",
"description": "The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU')." "description": "The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU')."
@ -777,57 +416,6 @@
"openvino": { "openvino": {
"label": "OpenVINO", "label": "OpenVINO",
"description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.", "description": "OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "Device Type", "label": "Device Type",
"description": "The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU')." "description": "The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU')."
@ -836,57 +424,6 @@
"rknn": { "rknn": {
"label": "RKNN", "label": "RKNN",
"description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.", "description": "RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"num_cores": { "num_cores": {
"label": "Number of NPU cores to use.", "label": "Number of NPU cores to use.",
"description": "The number of NPU cores to use (0 for auto)." "description": "The number of NPU cores to use (0 for auto)."
@ -894,168 +431,15 @@
}, },
"synaptics": { "synaptics": {
"label": "Synaptics", "label": "Synaptics",
"description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware.", "description": "Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware."
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
}, },
"teflon_tfl": { "teflon_tfl": {
"label": "Teflon", "label": "Teflon",
"description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs.", "description": "Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs."
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
}, },
"tensorrt": { "tensorrt": {
"label": "TensorRT", "label": "TensorRT",
"description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.", "description": "TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"device": { "device": {
"label": "GPU Device Index", "label": "GPU Device Index",
"description": "The GPU device index to use." "description": "The GPU device index to use."
@ -1064,57 +448,6 @@
"zmq": { "zmq": {
"label": "ZMQ IPC", "label": "ZMQ IPC",
"description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.", "description": "ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint.",
"type": {
"label": "Type"
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
},
"endpoint": { "endpoint": {
"label": "ZMQ IPC endpoint", "label": "ZMQ IPC endpoint",
"description": "The ZMQ endpoint to connect to." "description": "The ZMQ endpoint to connect to."

View File

@ -116,5 +116,10 @@
"nzpost": "NZPost", "nzpost": "NZPost",
"postnord": "PostNord", "postnord": "PostNord",
"gls": "GLS", "gls": "GLS",
"dpd": "DPD" "dpd": "DPD",
"canada_post": "Canada Post",
"royal_mail": "Royal Mail",
"school_bus": "School Bus",
"skunk": "Skunk",
"kangaroo": "Kangaroo"
} }

View File

@ -92,6 +92,7 @@
"triggers": "Triggers", "triggers": "Triggers",
"debug": "Debug", "debug": "Debug",
"frigateplus": "Frigate+", "frigateplus": "Frigate+",
"maintenance": "Maintenance",
"mediaSync": "Media sync", "mediaSync": "Media sync",
"regionGrid": "Region grid" "regionGrid": "Region grid"
}, },

View File

@ -75,7 +75,9 @@ export default function CameraReviewStatusToggles({
/> />
<div className="space-y-0.5"> <div className="space-y-0.5">
<Label htmlFor="detections-enabled"> <Label htmlFor="detections-enabled">
<Trans ns="views/settings">camera.review.detections</Trans> <Trans ns="views/settings">
cameraReview.review.detections
</Trans>
</Label> </Label>
</div> </div>
</div> </div>

View File

@ -1136,7 +1136,7 @@ export function ConfigSection({
)} )}
{hasChanges && ( {hasChanges && (
<Badge variant="outline" className="text-xs"> <Badge variant="outline" className="text-xs">
{t("modified", { {t("button.modified", {
ns: "common", ns: "common",
defaultValue: "Modified", defaultValue: "Modified",
})} })}
@ -1210,7 +1210,10 @@ export function ConfigSection({
variant="secondary" variant="secondary"
className="cursor-default bg-danger text-xs text-white hover:bg-danger" className="cursor-default bg-danger text-xs text-white hover:bg-danger"
> >
{t("modified", { ns: "common", defaultValue: "Modified" })} {t("button.modified", {
ns: "common",
defaultValue: "Modified",
})}
</Badge> </Badge>
)} )}
</div> </div>

View File

@ -7,7 +7,11 @@ import type {
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"; import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
import { LuCircleAlert } from "react-icons/lu"; import { LuCircleAlert } from "react-icons/lu";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { buildTranslationPath, humanizeKey } from "../utils"; import {
buildTranslationPath,
resolveConfigTranslation,
humanizeKey,
} from "../utils";
import type { ConfigFormContext } from "@/types/configForm"; import type { ConfigFormContext } from "@/types/configForm";
type ErrorSchemaNode = RJSFSchema & { type ErrorSchemaNode = RJSFSchema & {
@ -114,22 +118,15 @@ const resolveErrorFieldLabel = ({
); );
if (effectiveNamespace && translationPath) { if (effectiveNamespace && translationPath) {
const prefixedTranslationKey = const translated = resolveConfigTranslation(
sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) i18n,
? `${sectionI18nPrefix}.${translationPath}.label` t,
: undefined; translationPath,
const translationKey = `${translationPath}.label`; "label",
sectionI18nPrefix,
if ( effectiveNamespace,
prefixedTranslationKey && );
i18n.exists(prefixedTranslationKey, { ns: effectiveNamespace }) if (translated) return translated;
) {
return t(prefixedTranslationKey, { ns: effectiveNamespace });
}
if (i18n.exists(translationKey, { ns: effectiveNamespace })) {
return t(translationKey, { ns: effectiveNamespace });
}
} }
const schemaNode = resolveSchemaNodeForPath(schema, segments); const schemaNode = resolveSchemaNodeForPath(schema, segments);

View File

@ -20,6 +20,7 @@ import { requiresRestartForFieldPath } from "@/utils/configUtil";
import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator"; import RestartRequiredIndicator from "@/components/indicators/RestartRequiredIndicator";
import { import {
buildTranslationPath, buildTranslationPath,
resolveConfigTranslation,
getFilterObjectLabel, getFilterObjectLabel,
hasOverrideAtPath, hasOverrideAtPath,
humanizeKey, humanizeKey,
@ -219,20 +220,16 @@ export function FieldTemplate(props: FieldTemplateProps) {
// Try to get translated label, falling back to schema title, then RJSF label // Try to get translated label, falling back to schema title, then RJSF label
let finalLabel = label; let finalLabel = label;
if (effectiveNamespace && translationPath) { if (effectiveNamespace && translationPath) {
// Prefer camera-scoped translations when a section prefix is provided const translatedLabel = resolveConfigTranslation(
const prefixedTranslationKey = i18n,
sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) t,
? `${sectionI18nPrefix}.${translationPath}.label` translationPath,
: undefined; "label",
const translationKey = `${translationPath}.label`; sectionI18nPrefix,
effectiveNamespace,
if ( );
prefixedTranslationKey && if (translatedLabel) {
i18n.exists(prefixedTranslationKey, { ns: effectiveNamespace }) finalLabel = translatedLabel;
) {
finalLabel = t(prefixedTranslationKey, { ns: effectiveNamespace });
} else if (i18n.exists(translationKey, { ns: effectiveNamespace })) {
finalLabel = t(translationKey, { ns: effectiveNamespace });
} else if (schemaTitle) { } else if (schemaTitle) {
finalLabel = schemaTitle; finalLabel = schemaTitle;
} else if (translatedFilterObjectLabel) { } else if (translatedFilterObjectLabel) {
@ -330,18 +327,16 @@ export function FieldTemplate(props: FieldTemplateProps) {
// Try to get translated description, falling back to schema description // Try to get translated description, falling back to schema description
let finalDescription = description || ""; let finalDescription = description || "";
if (effectiveNamespace && translationPath) { if (effectiveNamespace && translationPath) {
const prefixedDescriptionKey = const translatedDescription = resolveConfigTranslation(
sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) i18n,
? `${sectionI18nPrefix}.${translationPath}.description` t,
: undefined; translationPath,
const descriptionKey = `${translationPath}.description`; "description",
if ( sectionI18nPrefix,
prefixedDescriptionKey && effectiveNamespace,
i18n.exists(prefixedDescriptionKey, { ns: effectiveNamespace }) );
) { if (translatedDescription) {
finalDescription = t(prefixedDescriptionKey, { ns: effectiveNamespace }); finalDescription = translatedDescription;
} else if (i18n.exists(descriptionKey, { ns: effectiveNamespace })) {
finalDescription = t(descriptionKey, { ns: effectiveNamespace });
} else if (schemaDescription) { } else if (schemaDescription) {
finalDescription = schemaDescription; finalDescription = schemaDescription;
} }

View File

@ -17,6 +17,7 @@ import { requiresRestartForFieldPath } from "@/utils/configUtil";
import { ConfigFormContext } from "@/types/configForm"; import { ConfigFormContext } from "@/types/configForm";
import { import {
buildTranslationPath, buildTranslationPath,
resolveConfigTranslation,
getDomainFromNamespace, getDomainFromNamespace,
getFilterObjectLabel, getFilterObjectLabel,
humanizeKey, humanizeKey,
@ -263,16 +264,14 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
let inferredLabel: string | undefined; let inferredLabel: string | undefined;
if (i18nNs && translationPath) { if (i18nNs && translationPath) {
const prefixedLabelKey = inferredLabel = resolveConfigTranslation(
sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) i18n,
? `${sectionI18nPrefix}.${translationPath}.label` t,
: undefined; translationPath,
const labelKey = `${translationPath}.label`; "label",
if (prefixedLabelKey && i18n.exists(prefixedLabelKey, { ns: i18nNs })) { sectionI18nPrefix,
inferredLabel = t(prefixedLabelKey, { ns: i18nNs }); i18nNs,
} else if (i18n.exists(labelKey, { ns: i18nNs })) { );
inferredLabel = t(labelKey, { ns: i18nNs });
}
} }
if (!inferredLabel && translatedFilterLabel) { if (!inferredLabel && translatedFilterLabel) {
inferredLabel = translatedFilterLabel; inferredLabel = translatedFilterLabel;
@ -286,19 +285,14 @@ export function ObjectFieldTemplate(props: ObjectFieldTemplateProps) {
let inferredDescription: string | undefined; let inferredDescription: string | undefined;
if (i18nNs && translationPath) { if (i18nNs && translationPath) {
const prefixedDescriptionKey = inferredDescription = resolveConfigTranslation(
sectionI18nPrefix && !translationPath.startsWith(`${sectionI18nPrefix}.`) i18n,
? `${sectionI18nPrefix}.${translationPath}.description` t,
: undefined; translationPath,
const descriptionKey = `${translationPath}.description`; "description",
if ( sectionI18nPrefix,
prefixedDescriptionKey && i18nNs,
i18n.exists(prefixedDescriptionKey, { ns: i18nNs }) );
) {
inferredDescription = t(prefixedDescriptionKey, { ns: i18nNs });
} else if (i18n.exists(descriptionKey, { ns: i18nNs })) {
inferredDescription = t(descriptionKey, { ns: i18nNs });
}
} }
const schemaDescription = schema?.description; const schemaDescription = schema?.description;
const fallbackDescription = const fallbackDescription =

View File

@ -124,6 +124,50 @@ export function buildTranslationPath(
return stringSegments.join("."); return stringSegments.join(".");
} }
/**
* Resolve a translated label or description for a config form field.
*
* Tries keys in priority order:
* 1. Type-specific prefixed key (e.g. "detectors.edgetpu.device.label")
* 2. Shared prefixed key with type stripped (e.g. "detectors.device.label")
* 3. Unprefixed key (e.g. "device.label")
*
* @returns The translated string, or undefined if no key matched.
*/
export function resolveConfigTranslation(
i18n: { exists: (key: string, opts?: Record<string, unknown>) => boolean },
t: (key: string, opts?: Record<string, unknown>) => string,
translationPath: string,
suffix: "label" | "description",
sectionI18nPrefix?: string,
ns?: string,
): string | undefined {
const opts = ns ? { ns } : undefined;
if (
sectionI18nPrefix &&
!translationPath.startsWith(`${sectionI18nPrefix}.`)
) {
// 1. Type-specific prefixed key (e.g. detectors.edgetpu.device.label)
const prefixed = `${sectionI18nPrefix}.${translationPath}.${suffix}`;
if (i18n.exists(prefixed, opts)) return t(prefixed, opts);
// 2. Shared prefixed key — strip leading type segment
// e.g. detectors.edgetpu.model.path → detectors.model.path
const dot = translationPath.indexOf(".");
if (dot !== -1) {
const shared = `${sectionI18nPrefix}.${translationPath.substring(dot + 1)}.${suffix}`;
if (i18n.exists(shared, opts)) return t(shared, opts);
}
}
// 3. Unprefixed key
const base = `${translationPath}.${suffix}`;
if (i18n.exists(base, opts)) return t(base, opts);
return undefined;
}
/** /**
* Extract the filter object label from a path containing "filters" segment. * Extract the filter object label from a path containing "filters" segment.
* Returns the segment immediately after "filters". * Returns the segment immediately after "filters".

View File

@ -4,6 +4,7 @@
export { export {
buildTranslationPath, buildTranslationPath,
resolveConfigTranslation,
getFilterObjectLabel, getFilterObjectLabel,
humanizeKey, humanizeKey,
getDomainFromNamespace, getDomainFromNamespace,

View File

@ -207,7 +207,10 @@ export function SingleSectionPage({
variant="secondary" variant="secondary"
className="cursor-default bg-danger text-xs text-white hover:bg-danger" className="cursor-default bg-danger text-xs text-white hover:bg-danger"
> >
{t("modified", { ns: "common", defaultValue: "Modified" })} {t("button.modified", {
ns: "common",
defaultValue: "Modified",
})}
</Badge> </Badge>
)} )}
</div> </div>
@ -242,7 +245,7 @@ export function SingleSectionPage({
variant="secondary" variant="secondary"
className="cursor-default bg-danger text-xs text-white hover:bg-danger" className="cursor-default bg-danger text-xs text-white hover:bg-danger"
> >
{t("modified", { ns: "common", defaultValue: "Modified" })} {t("button.modified", { ns: "common", defaultValue: "Modified" })}
</Badge> </Badge>
)} )}
</div> </div>