add descriptions to all fields for i18n

This commit is contained in:
Josh Hawkins 2026-01-24 08:54:56 -06:00
parent 0280c2ec43
commit bd0bafc05e
34 changed files with 925 additions and 515 deletions

View File

@ -1,24 +1,32 @@
{
"label": "Global Audio events configuration.",
"label": "Global Audio events configuration",
"description": "Global settings for audio-based event detection; camera-level settings can override these.",
"enabled": {
"label": "Enable audio events."
"label": "Enable audio events",
"description": "Enable or disable audio event detection globally. Can be overridden per camera."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event."
"label": "Seconds of not hearing the type of audio to end the event",
"description": "Amount of seconds without the configured audio type before the audio event is ended."
},
"min_volume": {
"label": "Min volume required to run audio detection."
"label": "Min volume required to run audio detection",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)."
},
"listen": {
"label": "Audio to listen for."
"label": "Audio to listen for",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
},
"filters": {
"label": "Audio filters."
"label": "Audio filters",
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection."
"label": "Keep track of original state of audio detection",
"description": "Indicates whether audio detection was originally enabled in the static config file."
},
"num_threads": {
"label": "Number of detection threads"
"label": "Number of detection threads",
"description": "Number of threads to use for audio detection processing."
}
}
}

View File

@ -1,18 +1,24 @@
{
"label": "Audio transcription config.",
"label": "Audio transcription config",
"description": "Settings for live and speech audio transcription used for events and live captions.",
"enabled": {
"label": "Enable audio transcription."
"label": "Enable audio transcription",
"description": "Enable or disable automatic audio transcription globally."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation."
"label": "Language abbreviation to use for audio event transcription/translation",
"description": "Language code used for transcription/translation (for example 'en' for English)."
},
"device": {
"label": "The device used for audio transcription."
"label": "The device used for audio transcription",
"description": "Device key (CPU/GPU) to run the transcription model on."
},
"model_size": {
"label": "The size of the embeddings model used."
"label": "The size of the embeddings model used",
"description": "Model size to use for transcription; smaller models run on CPU, larger models may need GPU."
},
"live_enabled": {
"label": "Enable live transcriptions."
"label": "Enable live transcriptions",
"description": "Enable streaming live transcription for audio as it is received."
}
}
}

View File

@ -1,37 +1,48 @@
{
"label": "Auth configuration.",
"label": "Auth configuration",
"description": "Authentication and session-related settings including cookie and rate limit options.",
"enabled": {
"label": "Enable authentication"
"label": "Enable authentication",
"description": "Enable native authentication for the Frigate UI."
},
"reset_admin_password": {
"label": "Reset the admin password on startup"
"label": "Reset the admin password on startup",
"description": "If true, reset the admin user's password on startup and print the new password in logs."
},
"cookie_name": {
"label": "Name for jwt token cookie"
"label": "Name for jwt token cookie",
"description": "Name of the cookie used to store the JWT token for native authentication."
},
"cookie_secure": {
"label": "Set secure flag on cookie"
"label": "Set secure flag on cookie",
"description": "Set the secure flag on the auth cookie; should be true when using TLS."
},
"session_length": {
"label": "Session length for jwt session tokens"
"label": "Session length for jwt session tokens",
"description": "Session duration in seconds for JWT-based sessions."
},
"refresh_time": {
"label": "Refresh the session if it is going to expire in this many seconds"
"label": "Refresh the session if it is going to expire in this many seconds",
"description": "When a session is within this many seconds of expiring, refresh it back to full length."
},
"failed_login_rate_limit": {
"label": "Rate limits for failed login attempts."
"label": "Rate limits for failed login attempts",
"description": "Rate limiting rules for failed login attempts to reduce brute-force attacks."
},
"trusted_proxies": {
"label": "Trusted proxies for determining IP address to rate limit"
"label": "Trusted proxies for determining IP address to rate limit",
"description": "List of trusted proxy IPs used when determining client IP for rate limiting."
},
"hash_iterations": {
"label": "Password hash iterations"
"label": "Password hash iterations",
"description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords."
},
"roles": {
"label": "Role to camera mappings. Empty list grants access to all cameras."
"label": "Role to camera mappings. Empty list grants access to all cameras",
"description": "Map roles to camera lists. An empty list grants access to all cameras for the role."
},
"admin_first_time_login": {
"label": "Internal field to expose first-time admin login flag to the UI",
"description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
}
}
}

View File

@ -1,36 +1,48 @@
{
"label": "Birdseye configuration.",
"label": "Birdseye configuration",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": {
"label": "Enable birdseye view."
"label": "Enable birdseye view",
"description": "Enable or disable the Birdseye view feature."
},
"mode": {
"label": "Tracking mode."
"label": "Tracking mode",
"description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'."
},
"restream": {
"label": "Restream birdseye via RTSP."
"label": "Restream birdseye via RTSP",
"description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously."
},
"width": {
"label": "Birdseye width."
"label": "Birdseye width",
"description": "Output width (pixels) of the composed Birdseye frame."
},
"height": {
"label": "Birdseye height."
"label": "Birdseye height",
"description": "Output height (pixels) of the composed Birdseye frame."
},
"quality": {
"label": "Encoding quality."
"label": "Encoding quality",
"description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)."
},
"inactivity_threshold": {
"label": "Birdseye Inactivity Threshold"
"label": "Birdseye Inactivity Threshold",
"description": "Seconds of inactivity after which a camera will stop being shown in Birdseye."
},
"layout": {
"label": "Birdseye Layout Config",
"description": "Layout options for the Birdseye composition.",
"scaling_factor": {
"label": "Birdseye Scaling Factor"
"label": "Birdseye Scaling Factor",
"description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)."
},
"max_cameras": {
"label": "Max cameras"
"label": "Max cameras",
"description": "Maximum number of cameras to display at once in Birdseye; shows the most recent cameras."
}
},
"idle_heartbeat_fps": {
"label": "Idle heartbeat FPS (0 disables, max 10)"
"label": "Idle heartbeat FPS (0 disables, max 10)",
"description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable."
}
}
}

View File

@ -1,12 +1,16 @@
{
"label": "Camera group configuration",
"description": "Configuration for named camera groups used to organize cameras in the UI.",
"cameras": {
"label": "List of cameras in this group."
"label": "List of cameras in this group",
"description": "Array of camera names included in this group."
},
"icon": {
"label": "Icon that represents camera group."
"label": "Icon that represents camera group",
"description": "Icon used to represent the camera group in the UI."
},
"order": {
"label": "Sort order for group."
"label": "Sort order for group",
"description": "Numeric order used to sort camera groups in the UI; larger numbers appear later."
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,49 +1,64 @@
{
"label": "Object classification config.",
"label": "Object classification config",
"description": "Settings for classification models used to refine object labels or state classification.",
"bird": {
"label": "Bird classification config.",
"label": "Bird classification config",
"description": "Settings specific to bird classification models.",
"enabled": {
"label": "Enable bird classification."
"label": "Enable bird classification",
"description": "Enable or disable bird classification."
},
"threshold": {
"label": "Minimum classification score required to be considered a match."
"label": "Minimum classification score required to be considered a match",
"description": "Minimum classification score required to accept a bird classification."
}
},
"custom": {
"label": "Custom Classification Model Configs.",
"label": "Custom Classification Model Configs",
"description": "Configuration for custom classification models used for objects or state detection.",
"enabled": {
"label": "Enable running the model."
"label": "Enable running the model",
"description": "Enable or disable the custom classification model."
},
"name": {
"label": "Name of classification model."
"label": "Name of classification model",
"description": "Identifier for the custom classification model to use."
},
"threshold": {
"label": "Classification score threshold to change the state."
"label": "Classification score threshold to change the state",
"description": "Score threshold used to change the classification state."
},
"save_attempts": {
"label": "Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification."
"label": "Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification",
"description": "How many classification attempts to save for recent classifications UI."
},
"object_config": {
"objects": {
"label": "Object types to classify."
"label": "Object types to classify",
"description": "List of object types to run object classification on."
},
"classification_type": {
"label": "Type of classification that is applied."
"label": "Type of classification that is applied",
"description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types."
}
},
"state_config": {
"cameras": {
"label": "Cameras to run classification on.",
"label": "Cameras to run classification on",
"description": "Per-camera crop and settings for running state classification.",
"crop": {
"label": "Crop of image frame on this camera to run classification on."
"label": "Crop of image frame on this camera to run classification on",
"description": "Crop coordinates to use for running classification on this camera."
}
},
"motion": {
"label": "If classification should be run when motion is detected in the crop."
"label": "If classification should be run when motion is detected in the crop",
"description": "If true, run classification when motion is detected within the specified crop."
},
"interval": {
"label": "Interval to run classification on in seconds."
"label": "Interval to run classification on in seconds",
"description": "Interval (seconds) between periodic classification runs for state classification."
}
}
}
}
}

View File

@ -1,6 +1,8 @@
{
"label": "Database configuration.",
"label": "Database configuration",
"description": "Settings for the SQLite database used by Frigate to store events and metadata.",
"path": {
"label": "Database path."
"label": "Database path",
"description": "Filesystem path where the Frigate SQLite database file will be stored."
}
}
}

View File

@ -1,45 +1,60 @@
{
"label": "Global object tracking configuration.",
"label": "Object tracking",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection Enabled."
"label": "Detection Enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
},
"height": {
"label": "Height of the stream for the detect role."
"label": "Height of the stream for the detect role",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"width": {
"label": "Width of the stream for the detect role."
"label": "Width of the stream for the detect role",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"fps": {
"label": "Number of frames per second to process through detection."
"label": "Number of frames per second to process through detection",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended ~5)."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker."
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends."
"label": "Maximum number of frames the object can disappear before detection ends",
"description": "Number of frames without a detection before a tracked object is considered gone."
},
"stationary": {
"label": "Stationary objects config.",
"label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": {
"label": "Frame interval for checking stationary objects."
"label": "Frame interval for checking stationary objects",
"description": "How often (in frames) to run a detection check to confirm a stationary object."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary"
"label": "Number of frames without a position change for an object to be considered stationary",
"description": "Number of frames with no position change required to mark an object as stationary."
},
"max_frames": {
"label": "Max frames for stationary objects.",
"label": "Max frames for stationary objects",
"description": "Limits how long stationary objects are tracked before being discarded (override defaults to control retention).",
"default": {
"label": "Default max frames."
"label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping."
},
"objects": {
"label": "Object specific max frames."
"label": "Object specific max frames",
"description": "Per-object overrides for maximum frames to track stationary objects."
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary."
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by."
"label": "Milliseconds to offset detect annotations by",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
}
}
}

View File

@ -1,12 +1,16 @@
{
"label": "Detector hardware configuration.",
"label": "Detector hardware configuration",
"description": "Configuration for object detectors (CPU, EdgeTPU, GPU backends) and any detector-specific model settings.",
"type": {
"label": "Detector Type"
"label": "Detector Type",
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')."
},
"model": {
"label": "Detector specific model configuration."
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.)."
},
"model_path": {
"label": "Detector specific model path."
"label": "Detector specific model path",
"description": "File path to the detector model binary if required by the chosen detector."
}
}
}

View File

@ -1,3 +1,4 @@
{
"label": "Frigate environment variables."
}
"label": "Frigate environment variables",
"description": "Key/value pairs of environment variables to set for the Frigate process."
}

View File

@ -1,34 +1,44 @@
{
"label": "Face recognition config.",
"label": "Face recognition config",
"description": "Global settings for face detection and recognition used across cameras unless overridden per-camera.",
"enabled": {
"label": "Enable face recognition."
"label": "Enable face recognition",
"description": "Enable or disable face recognition globally."
},
"model_size": {
"label": "The size of the embeddings model used."
"label": "The size of the embeddings model used",
"description": "Model size to use for face embeddings (small/large); larger may require GPU."
},
"unknown_score": {
"label": "Minimum face distance score required to be marked as a potential match."
"label": "Minimum face distance score required to be marked as a potential match",
"description": "Distance threshold below which a face is considered a potential match (lower = stricter)."
},
"detection_threshold": {
"label": "Minimum face detection score required to be considered a face."
"label": "Minimum face detection score required to be considered a face",
"description": "Minimum detection confidence required to consider a face detection valid."
},
"recognition_threshold": {
"label": "Minimum face distance score required to be considered a match."
"label": "Minimum face distance score required to be considered a match",
"description": "Face embedding distance threshold to consider two faces a match."
},
"min_area": {
"label": "Min area of face box to consider running face recognition."
"label": "Min area of face box to consider running face recognition",
"description": "Minimum area (pixels) of a detected face box required to attempt recognition."
},
"min_faces": {
"label": "Min face recognitions for the sub label to be applied to the person object."
"label": "Min face recognitions for the sub label to be applied to the person object",
"description": "Minimum number of face recognitions required before applying a recognized sub-label to a person."
},
"save_attempts": {
"label": "Number of face attempts to save in the recent recognitions tab."
"label": "Number of face attempts to save in the recent recognitions tab",
"description": "Number of face recognition attempts to retain for recent recognition UI."
},
"blur_confidence_filter": {
"label": "Apply blur quality filter to face confidence."
"label": "Apply blur quality filter to face confidence",
"description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces."
},
"device": {
"label": "The device key to use for face recognition.",
"label": "The device key to use for face recognition",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}
}
}

View File

@ -1,33 +1,44 @@
{
"label": "Global FFmpeg configuration.",
"label": "Global FFmpeg configuration",
"description": "Global FFmpeg settings including binary path, global args, hwaccel options, and per-role output args.",
"path": {
"label": "FFmpeg path"
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias."
},
"global_args": {
"label": "Global FFmpeg arguments."
"label": "Global FFmpeg arguments",
"description": "Global args passed to FFmpeg processes by default."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments."
"label": "FFmpeg hardware acceleration arguments",
"description": "Global hardware acceleration arguments for FFmpeg (auto or provider-specific)."
},
"input_args": {
"label": "FFmpeg input arguments."
"label": "FFmpeg input arguments",
"description": "Global input arguments applied to FFmpeg input streams by default."
},
"output_args": {
"label": "FFmpeg output arguments per role.",
"label": "FFmpeg output arguments per role",
"description": "Default output args used for different FFmpeg roles such as detect and record.",
"detect": {
"label": "Detect role FFmpeg output arguments."
"label": "Detect role FFmpeg output arguments",
"description": "Default output args for detect role streams."
},
"record": {
"label": "Record role FFmpeg output arguments."
"label": "Record role FFmpeg output arguments",
"description": "Default output args for record role streams."
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera."
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players."
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index to use for hardware acceleration."
"label": "GPU index to use for hardware acceleration",
"description": "Default GPU index used for hardware acceleration if available."
}
}
}

View File

@ -1,3 +1,4 @@
{
"label": "Global restream configuration."
}
"label": "Global restream configuration",
"description": "Settings for the integrated go2rtc restreaming service used for live stream relaying and translation."
}

View File

@ -1,12 +1,16 @@
{
"label": "Live playback settings.",
"label": "Live playback settings",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.",
"streams": {
"label": "Friendly names and restream names to use for live view."
"label": "Friendly names and restream names to use for live view",
"description": "Mapping of configured stream names to restream/go2rtc names used for live playback."
},
"height": {
"label": "Live camera view height"
"label": "Live camera view height",
"description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height."
},
"quality": {
"label": "Live camera view quality"
"label": "Live camera view quality",
"description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)."
}
}
}

View File

@ -1,9 +1,12 @@
{
"label": "Logging configuration.",
"label": "Logging configuration",
"description": "Controls default log verbosity and per-component log level overrides.",
"default": {
"label": "Default logging level."
"label": "Default logging level",
"description": "Default global log verbosity (debug, info, warning, error)."
},
"logs": {
"label": "Log level for specified processes."
"label": "Log level for specified processes",
"description": "Per-component log level overrides to increase or decrease verbosity for specific modules."
}
}
}

View File

@ -1,43 +1,56 @@
{
"label": "License Plate recognition config.",
"label": "License Plate recognition config",
"description": "Global license plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": {
"label": "Enable license plate recognition."
"label": "Enable license plate recognition",
"description": "Enable or disable LPR globally; camera-level settings can override."
},
"model_size": {
"label": "The size of the embeddings model used."
"label": "The size of the embeddings model used",
"description": "Model size used for text detection/recognition; small runs on CPU, large on GPU."
},
"detection_threshold": {
"label": "License plate object confidence score required to begin running recognition."
"label": "License plate object confidence score required to begin running recognition",
"description": "Detection confidence threshold to begin running OCR on a suspected plate."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition."
"label": "Minimum area of license plate to begin running recognition",
"description": "Minimum plate area (pixels) required to attempt recognition."
},
"recognition_threshold": {
"label": "Recognition confidence score required to add the plate to the object as a sub label."
"label": "Recognition confidence score required to add the plate to the object as a sub label",
"description": "Confidence threshold required for recognized plate text to be attached as a sub-label."
},
"min_plate_length": {
"label": "Minimum number of characters a license plate must have to be added to the object as a sub label."
"label": "Minimum number of characters a license plate must have to be added to the object as a sub label",
"description": "Minimum number of characters a recognized plate must contain to be considered valid."
},
"format": {
"label": "Regular expression for the expected format of license plate."
"label": "Regular expression for the expected format of license plate",
"description": "Optional regex to validate recognized plate strings against an expected format."
},
"match_distance": {
"label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate."
"label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate",
"description": "Number of character mismatches allowed when comparing detected plates to known plates."
},
"known_plates": {
"label": "Known plates to track (strings or regular expressions)."
"label": "Known plates to track (strings or regular expressions)",
"description": "List of plates or regexes to specially track or alert on."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition."
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results."
},
"debug_save_plates": {
"label": "Save plates captured for LPR for debugging purposes."
"label": "Save plates captured for LPR for debugging purposes",
"description": "Save plate crop images for debugging LPR performance."
},
"device": {
"label": "The device key to use for LPR.",
"label": "The device key to use for LPR",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
},
"replace_rules": {
"label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'."
"label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'",
"description": "Regex replacement rules used to normalize detected plate strings before matching."
}
}
}

View File

@ -1,33 +1,44 @@
{
"label": "Detection model configuration.",
"label": "Detection model configuration",
"description": "Settings to configure a custom object detection model, its input shape, and labelmap overrides.",
"path": {
"label": "Custom Object detection model path."
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector."
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width."
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height."
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization."
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels."
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape"
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format"
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type"
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type"
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
}
}

View File

@ -1,3 +1,4 @@
{
"label": "Global motion detection configuration."
}
"label": "Global motion detection configuration",
"description": "Default motion detection settings applied to cameras unless overridden per-camera."
}

View File

@ -1,42 +1,56 @@
{
"label": "MQTT configuration.",
"label": "MQTT configuration",
"description": "Settings for connecting and publishing telemetry, snapshots, and events to an MQTT broker.",
"enabled": {
"label": "Enable MQTT Communication."
"label": "Enable MQTT Communication",
"description": "Enable or disable MQTT integration for state, events, and snapshots."
},
"host": {
"label": "MQTT Host"
"label": "MQTT Host",
"description": "Hostname or IP address of the MQTT broker."
},
"port": {
"label": "MQTT Port"
"label": "MQTT Port",
"description": "Port of the MQTT broker (usually 1883 for plain MQTT)."
},
"topic_prefix": {
"label": "MQTT Topic Prefix"
"label": "MQTT Topic Prefix",
"description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances."
},
"client_id": {
"label": "MQTT Client ID"
"label": "MQTT Client ID",
"description": "Client identifier used when connecting to the MQTT broker; should be unique per instance."
},
"stats_interval": {
"label": "MQTT Camera Stats Interval"
"label": "MQTT Camera Stats Interval",
"description": "Interval in seconds for publishing system and camera stats to MQTT."
},
"user": {
"label": "MQTT Username"
"label": "MQTT Username",
"description": "Optional MQTT username; can be provided via environment variables or secrets."
},
"password": {
"label": "MQTT Password"
"label": "MQTT Password",
"description": "Optional MQTT password; can be provided via environment variables or secrets."
},
"tls_ca_certs": {
"label": "MQTT TLS CA Certificates"
"label": "MQTT TLS CA Certificates",
"description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)."
},
"tls_client_cert": {
"label": "MQTT TLS Client Certificate"
"label": "MQTT TLS Client Certificate",
"description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs."
},
"tls_client_key": {
"label": "MQTT TLS Client Key"
"label": "MQTT TLS Client Key",
"description": "Private key path for the client certificate."
},
"tls_insecure": {
"label": "MQTT TLS Insecure"
"label": "MQTT TLS Insecure",
"description": "Allow insecure TLS connections by skipping hostname verification (not recommended)."
},
"qos": {
"label": "MQTT QoS"
"label": "MQTT QoS",
"description": "Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2)."
}
}
}

View File

@ -1,9 +1,12 @@
{
"label": "Networking configuration",
"description": "Network-related settings such as IPv6 enablement for Frigate endpoints.",
"ipv6": {
"label": "IPv6 configuration",
"description": "IPv6-specific settings for Frigate network services.",
"enabled": {
"label": "Enable IPv6 for port 5000 and/or 8971"
"label": "Enable IPv6 for port 5000 and/or 8971",
"description": "Enable IPv6 support for Frigate services (API and UI) where applicable."
},
"listen": {
"label": "Listening ports configuration",

View File

@ -1,15 +1,20 @@
{
"label": "Global notification configuration.",
"label": "Global notification configuration",
"description": "Global settings to enable and control notifications; can be overridden per-camera.",
"enabled": {
"label": "Enable notifications"
"label": "Enable notifications",
"description": "Enable or disable notifications globally."
},
"email": {
"label": "Email required for push."
"label": "Email required for push",
"description": "Email address used for push notifications or required by certain notification providers."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)."
"label": "Cooldown period for notifications (time in seconds)",
"description": "Cooldown (seconds) between notifications to avoid spamming recipients."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications."
"label": "Keep track of original state of notifications",
"description": "Indicates whether notifications were enabled in the original static configuration."
}
}
}

View File

@ -1,69 +1,92 @@
{
"label": "Global object configuration.",
"label": "Global object configuration",
"description": "Global object tracking defaults including which labels to track and per-object filters.",
"track": {
"label": "Objects to track."
"label": "Objects to track",
"description": "List of object labels to track globally; camera configs can override this."
},
"filters": {
"label": "Object filters.",
"label": "Object filters",
"description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Minimum bounding box area (pixels or percentage) required for this object type."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted."
"label": "Minimum ratio of bounding box's width/height for object to be counted",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted."
"label": "Maximum ratio of bounding box's width/height for object to be counted",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted."
"label": "Average detection confidence threshold for object to be counted",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted."
"label": "Minimum detection confidence for object to be counted",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration."
"label": "Detection area polygon mask for this filter configuration",
"description": "Polygon coordinates defining where this filter applies within the frame."
}
},
"mask": {
"label": "Object mask."
"label": "Object mask",
"description": "Global mask polygon used to prevent object detection in specified areas."
},
"genai": {
"label": "Config for using genai to analyze objects.",
"label": "Config for using genai to analyze objects",
"description": "Global GenAI options for describing tracked objects and sending frames for generation.",
"enabled": {
"label": "Enable GenAI for camera."
"label": "Enable GenAI for camera",
"description": "Enable GenAI generation of descriptions for tracked objects by default."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions."
"label": "Use snapshots for generating descriptions",
"description": "Use object snapshots instead of thumbnails for GenAI description generation."
},
"prompt": {
"label": "Default caption prompt."
"label": "Default caption prompt",
"description": "Default prompt template used when generating descriptions with GenAI."
},
"object_prompts": {
"label": "Object specific prompts."
"label": "Object specific prompts",
"description": "Per-object prompts to customize GenAI outputs for specific labels."
},
"objects": {
"label": "List of objects to run generative AI for."
"label": "List of objects to run generative AI for",
"description": "List of object labels to send to GenAI by default."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI."
"label": "List of required zones to be entered in order to run generative AI",
"description": "Zones that must be entered for objects to qualify for GenAI description generation."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
"label": "Save thumbnails sent to generative AI for debugging purposes",
"description": "Save thumbnails sent to GenAI for debugging and review."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object.",
"label": "What triggers to use to send frames to generative AI for a tracked object",
"description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).",
"tracked_object_end": {
"label": "Send once the object is no longer tracked."
"label": "Send once the object is no longer tracked",
"description": "Send a request to GenAI when the tracked object ends."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated."
"label": "Send an early request to generative AI when X frames accumulated",
"description": "Send a request to GenAI after a specified number of significant updates for the tracked object."
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
"label": "Keep track of original state of generative AI",
"description": "Indicates whether GenAI was enabled in the original static config."
}
}
}
}

View File

@ -1,27 +1,36 @@
{
"label": "Proxy configuration.",
"label": "Proxy configuration",
"description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
"header_map": {
"label": "Header mapping definitions for proxy user passing.",
"label": "Header mapping definitions for proxy user passing",
"description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.",
"user": {
"label": "Header name from upstream proxy to identify user."
"label": "Header name from upstream proxy to identify user",
"description": "Header containing the authenticated username provided by the upstream proxy."
},
"role": {
"label": "Header name from upstream proxy to identify user role."
"label": "Header name from upstream proxy to identify user role",
"description": "Header containing the authenticated user's role or groups from the upstream proxy."
},
"role_map": {
"label": "Mapping of Frigate roles to upstream group values. "
"label": "Mapping of Frigate roles to upstream group values. ",
"description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)."
}
},
"logout_url": {
"label": "Redirect url for logging out with proxy."
"label": "Redirect url for logging out with proxy",
"description": "URL to redirect users to when logging out via the proxy."
},
"auth_secret": {
"label": "Secret value for proxy authentication."
"label": "Secret value for proxy authentication",
"description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies."
},
"default_role": {
"label": "Default role for proxy users."
"label": "Default role for proxy users",
"description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)."
},
"separator": {
"label": "The character used to separate values in a mapped header."
"label": "The character used to separate values in a mapped header",
"description": "Character used to split multiple values provided in proxy headers (for example a comma)."
}
}
}

View File

@ -1,72 +1,82 @@
{
"label": "Global record configuration.",
"label": "Global record configuration",
"description": "Global recording and retention settings applied to cameras unless overridden per-camera.",
"enabled": {
"label": "Enable record on all cameras."
"label": "Enable record on all cameras",
"description": "Enable or disable recording globally; individual cameras can override this."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs."
"label": "Number of minutes to wait between cleanup runs",
"description": "Minutes between cleanup passes that remove expired recording segments."
},
"continuous": {
"label": "Continuous recording retention settings.",
"label": "Continuous recording retention settings",
"days": {
"label": "Default retention period."
"label": "Default retention period",
"description": "Days to retain continuous (always-on) recordings."
}
},
"motion": {
"label": "Motion recording retention settings.",
"label": "Motion recording retention settings",
"days": {
"label": "Default retention period."
"label": "Default retention period",
"description": "Days to retain recordings triggered by motion."
}
},
"detections": {
"label": "Detection specific retention settings.",
"label": "Detection specific retention settings",
"pre_capture": {
"label": "Seconds to retain before event starts."
"label": "Seconds to retain before event starts"
},
"post_capture": {
"label": "Seconds to retain after event ends."
"label": "Seconds to retain after event ends"
},
"retain": {
"label": "Event retention settings.",
"label": "Event retention settings",
"days": {
"label": "Default retention period."
"label": "Default retention period"
},
"mode": {
"label": "Retain mode."
"label": "Retain mode"
}
}
},
"alerts": {
"label": "Alert specific retention settings.",
"label": "Alert specific retention settings",
"pre_capture": {
"label": "Seconds to retain before event starts."
"label": "Seconds to retain before event starts"
},
"post_capture": {
"label": "Seconds to retain after event ends."
"label": "Seconds to retain after event ends"
},
"retain": {
"label": "Event retention settings.",
"label": "Event retention settings",
"days": {
"label": "Default retention period."
"label": "Default retention period"
},
"mode": {
"label": "Retain mode."
"label": "Retain mode"
}
}
},
"export": {
"label": "Recording Export Config",
"description": "Settings used when exporting recordings such as timelapse and hardware acceleration.",
"hwaccel_args": {
"label": "Export-specific FFmpeg hardware acceleration arguments."
"label": "Export-specific FFmpeg hardware acceleration arguments",
"description": "Hardware acceleration args to use for export/transcode operations."
}
},
"preview": {
"label": "Recording Preview Config",
"description": "Settings controlling the quality of recording previews shown in the UI.",
"quality": {
"label": "Quality of recording preview."
"label": "Quality of recording preview",
"description": "Preview quality level (very_low, low, medium, high, very_high)."
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording."
"label": "Keep track of original state of recording",
"description": "Indicates whether recording was enabled in the original static configuration."
}
}
}

View File

@ -1,69 +1,92 @@
{
"label": "Review configuration.",
"label": "Review configuration",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
"alerts": {
"label": "Review alerts config.",
"label": "Review alerts config",
"description": "Settings for which tracked objects generate alerts and how alerts are retained.",
"enabled": {
"label": "Enable alerts."
"label": "Enable alerts",
"description": "Enable or disable alert generation for this camera."
},
"labels": {
"label": "Labels to create alerts for."
"label": "Labels to create alerts for",
"description": "List of object labels that qualify as alerts (for example: car, person)."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert."
"label": "List of required zones to be entered in order to save the event as an alert",
"description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts."
"label": "Keep track of original state of alerts",
"description": "Tracks whether alerts were originally enabled in the static configuration."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred."
"label": "Time to cutoff alerts after no alert-causing activity has occurred",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert."
}
},
"detections": {
"label": "Review detections config.",
"label": "Review detections config",
"description": "Settings for creating detection events (non-alert) and how long to keep them.",
"enabled": {
"label": "Enable detections."
"label": "Enable detections",
"description": "Enable or disable detection events for this camera."
},
"labels": {
"label": "Labels to create detections for."
"label": "Labels to create detections for",
"description": "List of object labels that qualify as detection events."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection."
"label": "List of required zones to be entered in order to save the event as a detection",
"description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred."
"label": "Time to cutoff detection after no detection-causing activity has occurred",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection."
},
"enabled_in_config": {
"label": "Keep track of original state of detections."
"label": "Keep track of original state of detections",
"description": "Tracks whether detections were originally enabled in the static configuration."
}
},
"genai": {
"label": "Review description genai config.",
"label": "Review description genai config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items.",
"enabled": {
"label": "Enable GenAI descriptions for review items."
"label": "Enable GenAI descriptions for review items",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items."
},
"alerts": {
"label": "Enable GenAI for alerts."
"label": "Enable GenAI for alerts",
"description": "Use GenAI to generate descriptions for alert items."
},
"detections": {
"label": "Enable GenAI for detections."
"label": "Enable GenAI for detections",
"description": "Use GenAI to generate descriptions for detection items."
},
"image_source": {
"label": "Image source for review descriptions."
"label": "Image source for review descriptions",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera."
"label": "Additional concerns that GenAI should make note of on this camera",
"description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes."
"label": "Save thumbnails sent to generative AI for debugging purposes",
"description": "Save thumbnails that are sent to the GenAI provider for debugging and review."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI."
"label": "Keep track of original state of generative AI",
"description": "Tracks whether GenAI review was originally enabled in the static configuration."
},
"preferred_language": {
"label": "Preferred language for GenAI Response"
"label": "Preferred language for GenAI Response",
"description": "Preferred language to request from the GenAI provider for generated responses."
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal and suspicious activity patterns for this property."
"label": "Custom activity context prompt defining normal and suspicious activity patterns for this property",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
}
}
}
}

View File

@ -1,3 +1,4 @@
{
"label": "If Frigate should be started in safe mode."
}
"label": "If Frigate should be started in safe mode",
"description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting."
}

View File

@ -1,19 +1,24 @@
{
"label": "Semantic search configuration.",
"label": "Semantic search configuration",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.",
"enabled": {
"label": "Enable semantic search."
"label": "Enable semantic search",
"description": "Enable or disable the semantic search feature."
},
"reindex": {
"label": "Reindex all tracked objects on startup."
"label": "Reindex all tracked objects on startup",
"description": "Trigger a full reindex of historical tracked objects into the embeddings database."
},
"model": {
"label": "The CLIP model to use for semantic search."
"label": "The CLIP model to use for semantic search",
"description": "The embeddings model to use for semantic search (for example 'jinav1')."
},
"model_size": {
"label": "The size of the embeddings model used."
"label": "The size of the embeddings model used",
"description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU."
},
"device": {
"label": "The device key to use for semantic search.",
"label": "The device key to use for semantic search",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}
}
}

View File

@ -1,39 +1,48 @@
{
"label": "Global snapshots configuration.",
"label": "Global snapshots configuration",
"description": "Global settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
"enabled": {
"label": "Snapshots enabled."
"label": "Snapshots enabled",
"description": "Enable or disable saving snapshots globally."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image."
"label": "Create a clean copy of the snapshot image",
"description": "Save an unannotated clean copy of snapshots in addition to annotated ones."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot."
"label": "Add a timestamp overlay on the snapshot",
"description": "Overlay a timestamp on saved snapshots."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot."
"label": "Add a bounding box overlay on the snapshot",
"description": "Draw bounding boxes for tracked objects on saved snapshots."
},
"crop": {
"label": "Crop the snapshot to the detected object."
"label": "Crop the snapshot to the detected object",
"description": "Crop saved snapshots to the detected object's bounding box."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot."
"label": "List of required zones to be entered in order to save a snapshot",
"description": "Zones an object must enter for a snapshot to be saved."
},
"height": {
"label": "Snapshot image height."
"label": "Snapshot image height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size."
},
"retain": {
"label": "Snapshot retention.",
"label": "Snapshot retention",
"default": {
"label": "Default retention period."
"label": "Default retention period"
},
"mode": {
"label": "Retain mode."
"label": "Retain mode"
},
"objects": {
"label": "Object retention period."
"label": "Object retention period"
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)."
"label": "Quality of the encoded jpeg (0-100)",
"description": "JPEG encode quality for saved snapshots (0-100)."
}
}
}

View File

@ -1,24 +1,32 @@
{
"label": "Telemetry configuration.",
"label": "Telemetry configuration",
"description": "System telemetry and stats options including GPU and network bandwidth monitoring.",
"network_interfaces": {
"label": "Enabled network interfaces for bandwidth calculation."
"label": "Enabled network interfaces for bandwidth calculation",
"description": "List of network interface name prefixes to monitor for bandwidth statistics."
},
"stats": {
"label": "System Stats Configuration",
"description": "Options to enable/disable collection of various system and GPU statistics.",
"amd_gpu_stats": {
"label": "Enable AMD GPU stats."
"label": "Enable AMD GPU stats",
"description": "Enable collection of AMD GPU statistics if an AMD GPU is present."
},
"intel_gpu_stats": {
"label": "Enable Intel GPU stats."
"label": "Enable Intel GPU stats",
"description": "Enable collection of Intel GPU statistics if an Intel GPU is present."
},
"network_bandwidth": {
"label": "Enable network bandwidth for ffmpeg processes."
"label": "Enable network bandwidth for ffmpeg processes",
"description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)."
},
"intel_gpu_device": {
"label": "Define the device to use when gathering SR-IOV stats."
"label": "Define the device to use when gathering SR-IOV stats",
"description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats."
}
},
"version_check": {
"label": "Enable latest version check."
"label": "Enable latest version check",
"description": "Enable an outbound check to detect if a newer Frigate version is available."
}
}
}

View File

@ -1,27 +1,35 @@
{
"label": "Global timestamp style configuration.",
"label": "Global timestamp style configuration",
"description": "Global styling options for in-feed timestamps applied to recordings and snapshots.",
"position": {
"label": "Timestamp position."
"label": "Timestamp position",
"description": "Position of the timestamp on the image (tl/tr/bl/br)."
},
"format": {
"label": "Timestamp format."
"label": "Timestamp format",
"description": "Datetime format string used for timestamps (Python datetime format codes)."
},
"color": {
"label": "Timestamp color.",
"label": "Timestamp color",
"red": {
"label": "Red"
"label": "Red",
"description": "Red component (0-255) for timestamp color."
},
"green": {
"label": "Green"
"label": "Green",
"description": "Green component (0-255) for timestamp color."
},
"blue": {
"label": "Blue"
"label": "Blue",
"description": "Blue component (0-255) for timestamp color."
}
},
"thickness": {
"label": "Timestamp thickness."
"label": "Timestamp thickness",
"description": "Line thickness of the timestamp text."
},
"effect": {
"label": "Timestamp effect."
"label": "Timestamp effect",
"description": "Visual effect for the timestamp text (none, solid, shadow)."
}
}
}

View File

@ -1,6 +1,8 @@
{
"label": "TLS configuration.",
"label": "TLS configuration",
"description": "TLS settings for Frigate's web endpoints (port 8971).",
"enabled": {
"label": "Enable TLS for port 8971"
"label": "Enable TLS for port 8971",
"description": "Enable TLS for Frigate's web UI and API on the configured TLS port."
}
}
}

View File

@ -1,18 +1,24 @@
{
"label": "UI configuration.",
"label": "UI configuration",
"description": "User interface preferences such as timezone, time/date formatting, and units.",
"timezone": {
"label": "Override UI timezone."
"label": "Override UI timezone",
"description": "Optional timezone to display across the UI (defaults to browser local time if unset)."
},
"time_format": {
"label": "Override UI time format."
"label": "Override UI time format",
"description": "Time format to use in the UI (browser, 12hour, or 24hour)."
},
"date_style": {
"label": "Override UI dateStyle."
"label": "Override UI dateStyle",
"description": "Date style to use in the UI (full, long, medium, short)."
},
"time_style": {
"label": "Override UI timeStyle."
"label": "Override UI timeStyle",
"description": "Time style to use in the UI (full, long, medium, short)."
},
"unit_system": {
"label": "The unit system to use for measurements."
"label": "The unit system to use for measurements",
"description": "Unit system for display (metric or imperial) used in the UI and MQTT."
}
}
}

View File

@ -1,3 +1,4 @@
{
"label": "Current config version."
}
"label": "Current config version",
"description": "Numeric or string version of the active configuration to help detect migrations or format changes."
}