new titles as i18n

This commit is contained in:
Josh Hawkins 2026-01-29 09:43:43 -06:00
parent 13b9d1b6e3
commit 30db891837
34 changed files with 1288 additions and 306 deletions

View File

@ -1,20 +1,20 @@
{ {
"label": "Audio events", "label": "Audio events",
"description": "settings for audio-based event detection; camera-level settings can override these.", "description": "Settings for audio-based event detection; can be overridden per-camera.",
"enabled": { "enabled": {
"label": "Enable audio events", "label": "Enable audio",
"description": "Enable or disable audio event detection globally. Can be overridden per camera." "description": "Enable or disable audio event detection; can be overridden per-camera."
}, },
"max_not_heard": { "max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event", "label": "End timeout",
"description": "Amount of seconds without the configured audio type before the audio event is ended." "description": "Amount of seconds without the configured audio type before the audio event is ended."
}, },
"min_volume": { "min_volume": {
"label": "Min volume required to run audio detection", "label": "Minimum volume",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)." "description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)."
}, },
"listen": { "listen": {
"label": "Audio to listen for", "label": "Listen types",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)." "description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
}, },
"filters": { "filters": {
@ -22,11 +22,11 @@
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives." "description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of audio detection", "label": "Original audio state",
"description": "Indicates whether audio detection was originally enabled in the static config file." "description": "Indicates whether audio detection was originally enabled in the static config file."
}, },
"num_threads": { "num_threads": {
"label": "Number of detection threads", "label": "Detection threads",
"description": "Number of threads to use for audio detection processing." "description": "Number of threads to use for audio detection processing."
} }
} }

View File

@ -6,19 +6,19 @@
"description": "Enable or disable automatic audio transcription globally." "description": "Enable or disable automatic audio transcription globally."
}, },
"language": { "language": {
"label": "Language abbreviation to use for audio event transcription/translation", "label": "Transcription language",
"description": "Language code used for transcription/translation (for example 'en' for English)." "description": "Language code used for transcription/translation (for example 'en' for English)."
}, },
"device": { "device": {
"label": "The device used for audio transcription", "label": "Transcription device",
"description": "Device key (CPU/GPU) to run the transcription model on." "description": "Device key (CPU/GPU) to run the transcription model on."
}, },
"model_size": { "model_size": {
"label": "The size of the embeddings model used", "label": "Model size",
"description": "Model size to use for transcription; smaller models run on CPU, larger models may need GPU." "description": "Model size to use for transcription; the small model runs on CPU, large model requires a GPU."
}, },
"live_enabled": { "live_enabled": {
"label": "Enable live transcriptions", "label": "Live transcription",
"description": "Enable streaming live transcription for audio as it is received." "description": "Enable streaming live transcription for audio as it is received."
} }
} }

View File

@ -6,43 +6,43 @@
"description": "Enable native authentication for the Frigate UI." "description": "Enable native authentication for the Frigate UI."
}, },
"reset_admin_password": { "reset_admin_password": {
"label": "Reset the admin password on startup", "label": "Reset admin password",
"description": "If true, reset the admin user's password on startup and print the new password in logs." "description": "If true, reset the admin user's password on startup and print the new password in logs."
}, },
"cookie_name": { "cookie_name": {
"label": "Name for jwt token cookie", "label": "JWT cookie name",
"description": "Name of the cookie used to store the JWT token for native authentication." "description": "Name of the cookie used to store the JWT token for native authentication."
}, },
"cookie_secure": { "cookie_secure": {
"label": "Set secure flag on cookie", "label": "Secure cookie flag",
"description": "Set the secure flag on the auth cookie; should be true when using TLS." "description": "Set the secure flag on the auth cookie; should be true when using TLS."
}, },
"session_length": { "session_length": {
"label": "Session length for jwt session tokens", "label": "Session length",
"description": "Session duration in seconds for JWT-based sessions." "description": "Session duration in seconds for JWT-based sessions."
}, },
"refresh_time": { "refresh_time": {
"label": "Refresh the session if it is going to expire in this many seconds", "label": "Session refresh window",
"description": "When a session is within this many seconds of expiring, refresh it back to full length." "description": "When a session is within this many seconds of expiring, refresh it back to full length."
}, },
"failed_login_rate_limit": { "failed_login_rate_limit": {
"label": "Rate limits for failed login attempts", "label": "Failed login limits",
"description": "Rate limiting rules for failed login attempts to reduce brute-force attacks." "description": "Rate limiting rules for failed login attempts to reduce brute-force attacks."
}, },
"trusted_proxies": { "trusted_proxies": {
"label": "Trusted proxies for determining IP address to rate limit", "label": "Trusted proxies",
"description": "List of trusted proxy IPs used when determining client IP for rate limiting." "description": "List of trusted proxy IPs used when determining client IP for rate limiting."
}, },
"hash_iterations": { "hash_iterations": {
"label": "Password hash iterations", "label": "Hash iterations",
"description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords." "description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords."
}, },
"roles": { "roles": {
"label": "Role to camera mappings. Empty list grants access to all cameras", "label": "Role mappings",
"description": "Map roles to camera lists. An empty list grants access to all cameras for the role." "description": "Map roles to camera lists. An empty list grants access to all cameras for the role."
}, },
"admin_first_time_login": { "admin_first_time_login": {
"label": "Internal field to expose first-time admin login flag to the UI", "label": "First-time admin flag",
"description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " "description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
} }
} }

View File

@ -2,27 +2,23 @@
"label": "Birdseye", "label": "Birdseye",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.", "description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": { "enabled": {
"label": "Enable birdseye view", "label": "Enable Birdseye",
"description": "Enable or disable the Birdseye view feature." "description": "Enable or disable the Birdseye view feature."
}, },
"mode": { "mode": {
"label": "Tracking mode", "label": "Tracking mode",
"description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'." "description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'."
}, },
"order": {
"label": "Position of the camera in the birdseye view",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
},
"restream": { "restream": {
"label": "Restream birdseye via RTSP", "label": "Restream RTSP",
"description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously." "description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously."
}, },
"width": { "width": {
"label": "Birdseye width", "label": "Width",
"description": "Output width (pixels) of the composed Birdseye frame." "description": "Output width (pixels) of the composed Birdseye frame."
}, },
"height": { "height": {
"label": "Birdseye height", "label": "Height",
"description": "Output height (pixels) of the composed Birdseye frame." "description": "Output height (pixels) of the composed Birdseye frame."
}, },
"quality": { "quality": {
@ -30,14 +26,14 @@
"description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)." "description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)."
}, },
"inactivity_threshold": { "inactivity_threshold": {
"label": "Birdseye Inactivity Threshold", "label": "Inactivity threshold",
"description": "Seconds of inactivity after which a camera will stop being shown in Birdseye." "description": "Seconds of inactivity after which a camera will stop being shown in Birdseye."
}, },
"layout": { "layout": {
"label": "Birdseye Layout", "label": "Layout",
"description": "Layout options for the Birdseye composition.", "description": "Layout options for the Birdseye composition.",
"scaling_factor": { "scaling_factor": {
"label": "Birdseye Scaling Factor", "label": "Scaling factor",
"description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)." "description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)."
}, },
"max_cameras": { "max_cameras": {
@ -46,7 +42,11 @@
} }
}, },
"idle_heartbeat_fps": { "idle_heartbeat_fps": {
"label": "Idle heartbeat FPS (0 disables, max 10)", "label": "Idle heartbeat FPS",
"description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable." "description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable."
},
"order": {
"label": "Position",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
} }
} }

View File

@ -2,15 +2,15 @@
"label": "Camera groups", "label": "Camera groups",
"description": "Configuration for named camera groups used to organize cameras in the UI.", "description": "Configuration for named camera groups used to organize cameras in the UI.",
"cameras": { "cameras": {
"label": "List of cameras in this group", "label": "Camera list",
"description": "Array of camera names included in this group." "description": "Array of camera names included in this group."
}, },
"icon": { "icon": {
"label": "Icon that represents camera group", "label": "Group icon",
"description": "Icon used to represent the camera group in the UI." "description": "Icon used to represent the camera group in the UI."
}, },
"order": { "order": {
"label": "Sort order for group", "label": "Sort order",
"description": "Numeric order used to sort camera groups in the UI; larger numbers appear later." "description": "Numeric order used to sort camera groups in the UI; larger numbers appear later."
} }
} }

View File

@ -2,31 +2,31 @@
"label": "MQTT", "label": "MQTT",
"description": "MQTT image publishing settings.", "description": "MQTT image publishing settings.",
"enabled": { "enabled": {
"label": "Send image over MQTT", "label": "Send image",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera." "description": "Enable publishing image snapshots for objects to MQTT topics for this camera."
}, },
"timestamp": { "timestamp": {
"label": "Add timestamp to MQTT image", "label": "Add timestamp",
"description": "Overlay a timestamp on images published to MQTT." "description": "Overlay a timestamp on images published to MQTT."
}, },
"bounding_box": { "bounding_box": {
"label": "Add bounding box to MQTT image", "label": "Add bounding box",
"description": "Draw bounding boxes on images published over MQTT." "description": "Draw bounding boxes on images published over MQTT."
}, },
"crop": { "crop": {
"label": "Crop MQTT image to detected object", "label": "Crop image",
"description": "Crop images published to MQTT to the detected object's bounding box." "description": "Crop images published to MQTT to the detected object's bounding box."
}, },
"height": { "height": {
"label": "MQTT image height", "label": "Image height",
"description": "Height (pixels) to resize images published over MQTT." "description": "Height (pixels) to resize images published over MQTT."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to send the image", "label": "Required zones",
"description": "Zones that an object must enter for an MQTT image to be published." "description": "Zones that an object must enter for an MQTT image to be published."
}, },
"quality": { "quality": {
"label": "Quality of the encoded jpeg (0-100)", "label": "JPEG quality",
"description": "JPEG quality for images published to MQTT (0-100)." "description": "JPEG quality for images published to MQTT (0-100)."
} }
} }

View File

@ -2,11 +2,11 @@
"label": "Camera UI", "label": "Camera UI",
"description": "Display ordering and dashboard visibility for this camera in the UI.", "description": "Display ordering and dashboard visibility for this camera in the UI.",
"order": { "order": {
"label": "Order of camera in UI", "label": "UI order",
"description": "Numeric order used to sort the camera in the UI; larger numbers appear later." "description": "Numeric order used to sort the camera in the UI; larger numbers appear later."
}, },
"dashboard": { "dashboard": {
"label": "Show this camera in Frigate dashboard UI", "label": "Show in dashboard",
"description": "Toggle whether this camera is visible in the main dashboard." "description": "Toggle whether this camera is visible in the main dashboard."
} }
} }

View File

@ -0,0 +1,928 @@
{
"label": "Cameras",
"description": "Cameras",
"name": {
"label": "Camera name",
"description": "Camera name is required"
},
"friendly_name": {
"label": "Friendly name",
"description": "Camera friendly name used in the Frigate UI"
},
"enabled": {
"label": "Enabled",
"description": "Enabled"
},
"audio": {
"label": "Audio events",
"description": "Settings for audio-based event detection; can be overridden per-camera.",
"enabled": {
"label": "Enable audio",
"description": "Enable or disable audio event detection; can be overridden per-camera."
},
"max_not_heard": {
"label": "End timeout",
"description": "Amount of seconds without the configured audio type before the audio event is ended."
},
"min_volume": {
"label": "Minimum volume",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)."
},
"listen": {
"label": "Listen types",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
},
"filters": {
"label": "Audio filters",
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
},
"enabled_in_config": {
"label": "Original audio state",
"description": "Indicates whether audio detection was originally enabled in the static config file."
},
"num_threads": {
"label": "Detection threads",
"description": "Number of threads to use for audio detection processing."
}
},
"audio_transcription": {
"label": "Audio transcription",
"description": "Settings for live and speech audio transcription used for events and live captions.",
"enabled": {
"label": "Enable transcription",
"description": "Enable or disable automatic audio transcription."
},
"enabled_in_config": {
"label": "Original transcription state"
},
"live_enabled": {
"label": "Live transcription",
"description": "Enable streaming live transcription for audio as it is received."
}
},
"birdseye": {
"label": "Birdseye",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": {
"label": "Enable Birdseye",
"description": "Enable or disable the Birdseye view feature."
},
"mode": {
"label": "Tracking mode",
"description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'."
},
"order": {
"label": "Position",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
}
},
"detect": {
"label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
},
"height": {
"label": "Detect height",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"width": {
"label": "Detect width",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"fps": {
"label": "Detect FPS",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)."
},
"min_initialized": {
"label": "Min initialization hits",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2."
},
"max_disappeared": {
"label": "Max disappeared frames",
"description": "Number of frames without a detection before a tracked object is considered gone."
},
"stationary": {
"label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": {
"label": "Stationary interval",
"description": "How often (in frames) to run a detection check to confirm a stationary object."
},
"threshold": {
"label": "Stationary threshold",
"description": "Number of frames with no position change required to mark an object as stationary."
},
"max_frames": {
"label": "Max frames",
"description": "Limits how long stationary objects are tracked before being discarded.",
"default": {
"label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping."
},
"objects": {
"label": "Object max frames",
"description": "Per-object overrides for maximum frames to track stationary objects."
}
},
"classifier": {
"label": "Enable visual classifier",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
}
},
"annotation_offset": {
"label": "Annotation offset",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
}
},
"face_recognition": {
"label": "Face recognition",
"description": "Settings for face detection and recognition; can be overridden per-camera.",
"enabled": {
"label": "Enable face recognition",
"description": "Enable or disable face recognition globally."
},
"min_area": {
"label": "Minimum face area",
"description": "Minimum area (pixels) of a detected face box required to attempt recognition."
}
},
"ffmpeg": {
"label": "FFmpeg",
"description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
"path": {
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias (\"5.0\" or \"7.0\")."
},
"global_args": {
"label": "FFmpeg global args",
"description": "Global args passed to FFmpeg processes by default."
},
"hwaccel_args": {
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for FFmpeg (auto or provider-specific)."
},
"input_args": {
"label": "Input args",
"description": "Input arguments applied to FFmpeg input streams by default."
},
"output_args": {
"label": "Output args",
"description": "Default output args used for different FFmpeg roles such as detect and record.",
"detect": {
"label": "Detect output args",
"description": "Default output args for detect role streams."
},
"record": {
"label": "Record output args",
"description": "Default output args for record role streams."
}
},
"retry_interval": {
"label": "FFmpeg retry time",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10."
},
"apple_compatibility": {
"label": "Apple compatibility",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index",
"description": "Default GPU index used for hardware acceleration if available."
},
"inputs": {
"label": "Camera inputs",
"description": "List of input stream definitions (paths and roles) for this camera.",
"path": {
"label": "Input path",
"description": "Camera input stream URL or path."
},
"roles": {
"label": "Input roles",
"description": "Roles for this input stream (for example: detect, record, audio)."
},
"global_args": {
"label": "FFmpeg args",
"description": "FFmpeg arguments for this input stream."
},
"hwaccel_args": {
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for this input stream."
},
"input_args": {
"label": "Input args",
"description": "Input arguments specific to this stream."
}
}
},
"live": {
"label": "Live playback",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.",
"streams": {
"label": "Live stream names",
"description": "Mapping of configured stream names to restream/go2rtc names used for live playback."
},
"height": {
"label": "Live height",
"description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height."
},
"quality": {
"label": "Live quality",
"description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)."
}
},
"lpr": {
"label": "License Plate Recognition",
"description": "License plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": {
"label": "Enable LPR",
"description": "Enable or disable LPR globally; camera-level settings can override."
},
"expire_time": {
"label": "Expire seconds",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
},
"min_area": {
"label": "Minimum plate area",
"description": "Minimum plate area (pixels) required to attempt recognition."
},
"enhancement": {
"label": "Enhancement level",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution."
}
},
"motion": {
"label": "Motion detection",
"description": "Default motion detection settings; can be overridden per-camera.",
"enabled": {
"label": "Enable motion detection",
"description": "Enable or disable motion detection globally; per-camera settings can override this."
},
"threshold": {
"label": "Motion threshold",
"description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)."
},
"lightning_threshold": {
"label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
},
"improve_contrast": {
"label": "Improve contrast",
"description": "Apply contrast improvement to frames before motion analysis to help detection."
},
"contour_area": {
"label": "Contour area",
"description": "Minimum contour area in pixels required for a motion contour to be counted."
},
"delta_alpha": {
"label": "Delta alpha",
"description": "Alpha blending factor used in frame differencing for motion calculation."
},
"frame_alpha": {
"label": "Frame alpha",
"description": "Alpha value used when blending frames for motion preprocessing."
},
"frame_height": {
"label": "Frame height",
"description": "Height in pixels to scale frames to when computing motion (useful for performance)."
},
"mask": {
"label": "Mask coordinates",
"description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas."
},
"mqtt_off_delay": {
"label": "MQTT off delay",
"description": "Seconds to wait after last motion before publishing an MQTT 'off' state."
},
"enabled_in_config": {
"label": "Original motion state",
"description": "Indicates whether motion detection was enabled in the original static configuration."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"objects": {
"label": "Objects",
"description": "Object tracking defaults including which labels to track and per-object filters.",
"track": {
"label": "Objects to track",
"description": "List of object labels to track globally; camera configs can override this."
},
"filters": {
"label": "Object filters",
"description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
"min_area": {
"label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"mask": {
"label": "Object mask",
"description": "Mask polygon used to prevent object detection in specified areas."
},
"genai": {
"label": "GenAI object config",
"description": "GenAI options for describing tracked objects and sending frames for generation.",
"enabled": {
"label": "Enable GenAI",
"description": "Enable GenAI generation of descriptions for tracked objects by default."
},
"use_snapshot": {
"label": "Use snapshots",
"description": "Use object snapshots instead of thumbnails for GenAI description generation."
},
"prompt": {
"label": "Caption prompt",
"description": "Default prompt template used when generating descriptions with GenAI."
},
"object_prompts": {
"label": "Object prompts",
"description": "Per-object prompts to customize GenAI outputs for specific labels."
},
"objects": {
"label": "GenAI objects",
"description": "List of object labels to send to GenAI by default."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that must be entered for objects to qualify for GenAI description generation."
},
"debug_save_thumbnails": {
"label": "Save thumbnails",
"description": "Save thumbnails sent to GenAI for debugging and review."
},
"send_triggers": {
"label": "GenAI triggers",
"description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).",
"tracked_object_end": {
"label": "Send on end",
"description": "Send a request to GenAI when the tracked object ends."
},
"after_significant_updates": {
"label": "Early GenAI trigger",
"description": "Send a request to GenAI after a specified number of significant updates for the tracked object."
}
},
"enabled_in_config": {
"label": "Original GenAI state",
"description": "Indicates whether GenAI was enabled in the original static config."
}
}
},
"record": {
"label": "Recording",
"description": "Recording and retention settings; can be overridden per-camera.",
"enabled": {
"label": "Enable recording",
"description": "Enable or disable recording globally; individual cameras can override this."
},
"expire_interval": {
"label": "Record cleanup interval",
"description": "Minutes between cleanup passes that remove expired recording segments."
},
"continuous": {
"label": "Continuous retention",
"description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"motion": {
"label": "Motion retention",
"description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"detections": {
"label": "Detection retention",
"description": "Recording retention settings for detection events including pre/post capture durations.",
"pre_capture": {
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"alerts": {
"label": "Alert retention",
"description": "Recording retention settings for alert events including pre/post capture durations.",
"pre_capture": {
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"export": {
"label": "Export config",
"description": "Settings used when exporting recordings such as timelapse and hardware acceleration.",
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
}
},
"preview": {
"label": "Preview config",
"description": "Settings controlling the quality of recording previews shown in the UI.",
"quality": {
"label": "Preview quality",
"description": "Preview quality level (very_low, low, medium, high, very_high)."
}
},
"enabled_in_config": {
"label": "Original recording state",
"description": "Indicates whether recording was enabled in the original static configuration."
}
},
"review": {
"label": "Review",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage; can be overridden per-camera.",
"alerts": {
"label": "Alerts config",
"description": "Settings for which tracked objects generate alerts and how alerts are retained.",
"enabled": {
"label": "Enable alerts",
"description": "Enable or disable alert generation for this camera."
},
"labels": {
"label": "Alert labels",
"description": "List of object labels that qualify as alerts (for example: car, person)."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone."
},
"enabled_in_config": {
"label": "Original alerts state",
"description": "Tracks whether alerts were originally enabled in the static configuration."
},
"cutoff_time": {
"label": "Alerts cutoff time",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert."
}
},
"detections": {
"label": "Detections config",
"description": "Settings for creating detection events (non-alert) and how long to keep them.",
"enabled": {
"label": "Enable detections",
"description": "Enable or disable detection events for this camera."
},
"labels": {
"label": "Detection labels",
"description": "List of object labels that qualify as detection events."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone."
},
"cutoff_time": {
"label": "Detections cutoff time",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection."
},
"enabled_in_config": {
"label": "Original detections state",
"description": "Tracks whether detections were originally enabled in the static configuration."
}
},
"genai": {
"label": "GenAI config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items.",
"enabled": {
"label": "Enable GenAI descriptions",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items."
},
"alerts": {
"label": "Enable GenAI for alerts",
"description": "Use GenAI to generate descriptions for alert items."
},
"detections": {
"label": "Enable GenAI for detections",
"description": "Use GenAI to generate descriptions for detection items."
},
"image_source": {
"label": "Review image source",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens."
},
"additional_concerns": {
"label": "Additional concerns",
"description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails",
"description": "Save thumbnails that are sent to the GenAI provider for debugging and review."
},
"enabled_in_config": {
"label": "Original GenAI state",
"description": "Tracks whether GenAI review was originally enabled in the static configuration."
},
"preferred_language": {
"label": "Preferred language",
"description": "Preferred language to request from the GenAI provider for generated responses."
},
"activity_context_prompt": {
"label": "Activity context prompt",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
}
}
},
"semantic_search": {
"label": "Semantic Search",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.",
"triggers": {
"label": "Triggers",
"description": "Actions and matching criteria for camera-specific semantic search triggers.",
"friendly_name": {
"label": "Friendly name",
"description": "Optional friendly name displayed in the UI for this trigger."
},
"enabled": {
"label": "Enable this trigger",
"description": "Enable or disable this semantic search trigger."
},
"type": {
"label": "Trigger type",
"description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)."
},
"data": {
"label": "Trigger content",
"description": "Text phrase or thumbnail ID to match against tracked objects."
},
"threshold": {
"label": "Trigger threshold",
"description": "Minimum similarity score (0-1) required to activate this trigger."
},
"actions": {
"label": "Trigger actions",
"description": "List of actions to execute when trigger matches (notification, sub_label, attribute)."
}
}
},
"snapshots": {
"label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
"enabled": {
"label": "Snapshots enabled",
"description": "Enable or disable saving snapshots globally."
},
"clean_copy": {
"label": "Save clean copy",
"description": "Save an unannotated clean copy of snapshots in addition to annotated ones."
},
"timestamp": {
"label": "Timestamp overlay",
"description": "Overlay a timestamp on saved snapshots."
},
"bounding_box": {
"label": "Bounding box overlay",
"description": "Draw bounding boxes for tracked objects on saved snapshots."
},
"crop": {
"label": "Crop snapshot",
"description": "Crop saved snapshots to the detected object's bounding box."
},
"required_zones": {
"label": "Required zones",
"description": "Zones an object must enter for a snapshot to be saved."
},
"height": {
"label": "Snapshot height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size."
},
"retain": {
"label": "Snapshot retention",
"description": "Retention settings for saved snapshots including default days and per-object overrides.",
"default": {
"label": "Default retention",
"description": "Default number of days to retain snapshots."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
},
"objects": {
"label": "Object retention",
"description": "Per-object overrides for snapshot retention days."
}
},
"quality": {
"label": "JPEG quality",
"description": "JPEG encode quality for saved snapshots (0-100)."
}
},
"timestamp_style": {
"label": "Timestamp style",
"description": "Styling options for in-feed timestamps applied to recordings and snapshots.",
"position": {
"label": "Timestamp position",
"description": "Position of the timestamp on the image (tl/tr/bl/br)."
},
"format": {
"label": "Timestamp format",
"description": "Datetime format string used for timestamps (Python datetime format codes)."
},
"color": {
"label": "Timestamp color",
"description": "RGB color values for the timestamp text (all values 0-255).",
"red": {
"label": "Red",
"description": "Red component (0-255) for timestamp color."
},
"green": {
"label": "Green",
"description": "Green component (0-255) for timestamp color."
},
"blue": {
"label": "Blue",
"description": "Blue component (0-255) for timestamp color."
}
},
"thickness": {
"label": "Timestamp thickness",
"description": "Line thickness of the timestamp text."
},
"effect": {
"label": "Timestamp effect",
"description": "Visual effect for the timestamp text (none, solid, shadow)."
}
},
"best_image_timeout": {
"label": "Best image timeout",
"description": "How long to wait for the image with the highest confidence score."
},
"mqtt": {
"label": "MQTT",
"description": "MQTT image publishing settings.",
"enabled": {
"label": "Send image",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera."
},
"timestamp": {
"label": "Add timestamp",
"description": "Overlay a timestamp on images published to MQTT."
},
"bounding_box": {
"label": "Add bounding box",
"description": "Draw bounding boxes on images published over MQTT."
},
"crop": {
"label": "Crop image",
"description": "Crop images published to MQTT to the detected object's bounding box."
},
"height": {
"label": "Image height",
"description": "Height (pixels) to resize images published over MQTT."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter for an MQTT image to be published."
},
"quality": {
"label": "JPEG quality",
"description": "JPEG quality for images published to MQTT (0-100)."
}
},
"notifications": {
"label": "Notifications",
"description": "Settings to enable and control notifications; can be overridden per-camera.",
"enabled": {
"label": "Enable notifications",
"description": "Enable or disable notifications globally."
},
"email": {
"label": "Notification email",
"description": "Email address used for push notifications or required by certain notification providers."
},
"cooldown": {
"label": "Cooldown period",
"description": "Cooldown (seconds) between notifications to avoid spamming recipients."
},
"enabled_in_config": {
"label": "Original notifications state",
"description": "Indicates whether notifications were enabled in the original static configuration."
}
},
"onvif": {
"label": "ONVIF",
"description": "ONVIF connection and PTZ autotracking settings for this camera.",
"host": {
"label": "ONVIF host",
"description": "Host (and optional scheme) for the ONVIF service for this camera."
},
"port": {
"label": "ONVIF port",
"description": "Port number for the ONVIF service."
},
"user": {
"label": "ONVIF username",
"description": "Username for ONVIF authentication; some devices require admin user for ONVIF."
},
"password": {
"label": "ONVIF password",
"description": "Password for ONVIF authentication."
},
"tls_insecure": {
"label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"autotracking": {
"label": "PTZ config",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
"enabled": {
"label": "Enable Autotracking",
"description": "Enable or disable automatic PTZ camera tracking of detected objects."
},
"calibrate_on_startup": {
"label": "Calibrate on start",
"description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration."
},
"zooming": {
"label": "Zoom mode",
"description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)."
},
"zoom_factor": {
"label": "Zoom factor",
"description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75."
},
"track": {
"label": "Tracked objects",
"description": "List of object types that should trigger autotracking."
},
"required_zones": {
"label": "Required zones",
"description": "Objects must enter one of these zones before autotracking begins."
},
"return_preset": {
"label": "Return preset",
"description": "ONVIF preset name configured in camera firmware to return to after tracking ends."
},
"timeout": {
"label": "Return timeout",
"description": "Wait this many seconds after losing tracking before returning camera to preset position."
},
"movement_weights": {
"label": "Movement weights",
"description": "Calibration values automatically generated by camera calibration. Do not modify manually."
},
"enabled_in_config": {
"label": "Original autotrack state",
"description": "Internal field to track whether autotracking was enabled in configuration."
}
},
"ignore_time_mismatch": {
"label": "Ignore time mismatch",
"description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication."
}
},
"type": {
"label": "Camera type",
"description": "Camera Type"
},
"ui": {
"label": "Camera UI",
"description": "Display ordering and dashboard visibility for this camera in the UI.",
"order": {
"label": "UI order",
"description": "Numeric order used to sort the camera in the UI; larger numbers appear later."
},
"dashboard": {
"label": "Show in dashboard",
"description": "Toggle whether this camera is visible in the main dashboard."
}
},
"webui_url": {
"label": "Camera URL",
"description": "URL to visit the camera directly from system page"
},
"zones": {
"label": "Zones",
"description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
"friendly_name": {
"label": "Zone name",
"description": "A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used."
},
"filters": {
"label": "Zone filters",
"description": "Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.",
"min_area": {
"label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"coordinates": {
"label": "Coordinates",
"description": "Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy)."
},
"distances": {
"label": "Real-world distances",
"description": "Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set."
},
"inertia": {
"label": "Inertia frames",
"description": "Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections."
},
"loitering_time": {
"label": "Loitering seconds",
"description": "Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection."
},
"speed_threshold": {
"label": "Minimum speed",
"description": "Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers."
},
"objects": {
"label": "Trigger objects",
"description": "List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered."
}
},
"enabled_in_config": {
"label": "Original camera state",
"description": "Keep track of original state of camera."
}
}

View File

@ -5,11 +5,11 @@
"label": "Bird classification config", "label": "Bird classification config",
"description": "Settings specific to bird classification models.", "description": "Settings specific to bird classification models.",
"enabled": { "enabled": {
"label": "Enable bird classification", "label": "Bird classification",
"description": "Enable or disable bird classification." "description": "Enable or disable bird classification."
}, },
"threshold": { "threshold": {
"label": "Minimum classification score required to be considered a match", "label": "Minimum score",
"description": "Minimum classification score required to accept a bird classification." "description": "Minimum classification score required to accept a bird classification."
} }
}, },
@ -17,46 +17,46 @@
"label": "Custom Classification Models", "label": "Custom Classification Models",
"description": "Configuration for custom classification models used for objects or state detection.", "description": "Configuration for custom classification models used for objects or state detection.",
"enabled": { "enabled": {
"label": "Enable running the model", "label": "Enable model",
"description": "Enable or disable the custom classification model." "description": "Enable or disable the custom classification model."
}, },
"name": { "name": {
"label": "Name of classification model", "label": "Model name",
"description": "Identifier for the custom classification model to use." "description": "Identifier for the custom classification model to use."
}, },
"threshold": { "threshold": {
"label": "Classification score threshold to change the state", "label": "Score threshold",
"description": "Score threshold used to change the classification state." "description": "Score threshold used to change the classification state."
}, },
"save_attempts": { "save_attempts": {
"label": "Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification", "label": "Saved attempts",
"description": "How many classification attempts to save for recent classifications UI." "description": "How many classification attempts to save for recent classifications UI."
}, },
"object_config": { "object_config": {
"objects": { "objects": {
"label": "Object types to classify", "label": "Classify objects",
"description": "List of object types to run object classification on." "description": "List of object types to run object classification on."
}, },
"classification_type": { "classification_type": {
"label": "Type of classification that is applied", "label": "Classification type",
"description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types." "description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types."
} }
}, },
"state_config": { "state_config": {
"cameras": { "cameras": {
"label": "Cameras to run classification on", "label": "Classification cameras",
"description": "Per-camera crop and settings for running state classification.", "description": "Per-camera crop and settings for running state classification.",
"crop": { "crop": {
"label": "Crop of image frame on this camera to run classification on", "label": "Classification crop",
"description": "Crop coordinates to use for running classification on this camera." "description": "Crop coordinates to use for running classification on this camera."
} }
}, },
"motion": { "motion": {
"label": "If classification should be run when motion is detected in the crop", "label": "Run on motion",
"description": "If true, run classification when motion is detected within the specified crop." "description": "If true, run classification when motion is detected within the specified crop."
}, },
"interval": { "interval": {
"label": "Interval to run classification on in seconds", "label": "Classification interval",
"description": "Interval (seconds) between periodic classification runs for state classification." "description": "Interval (seconds) between periodic classification runs for state classification."
} }
} }

View File

@ -1,6 +1,6 @@
{ {
"label": "Database", "label": "Database",
"description": "Settings for the SQLite database used by Frigate to store events and metadata.", "description": "Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
"path": { "path": {
"label": "Database path", "label": "Database path",
"description": "Filesystem path where the Frigate SQLite database file will be stored." "description": "Filesystem path where the Frigate SQLite database file will be stored."

View File

@ -2,59 +2,59 @@
"label": "Object Detection", "label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.", "description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": { "enabled": {
"label": "Detection Enabled", "label": "Detection enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run." "description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
}, },
"height": { "height": {
"label": "Height of the stream for the detect role", "label": "Detect height",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." "description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
}, },
"width": { "width": {
"label": "Width of the stream for the detect role", "label": "Detect width",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution." "description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
}, },
"fps": { "fps": {
"label": "Number of frames per second to process through detection", "label": "Detect FPS",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended ~5)." "description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)."
}, },
"min_initialized": { "min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker", "label": "Min initialization hits",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations." "description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2."
}, },
"max_disappeared": { "max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends", "label": "Max disappeared frames",
"description": "Number of frames without a detection before a tracked object is considered gone." "description": "Number of frames without a detection before a tracked object is considered gone."
}, },
"stationary": { "stationary": {
"label": "Stationary objects config", "label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.", "description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": { "interval": {
"label": "Frame interval for checking stationary objects", "label": "Stationary interval",
"description": "How often (in frames) to run a detection check to confirm a stationary object." "description": "How often (in frames) to run a detection check to confirm a stationary object."
}, },
"threshold": { "threshold": {
"label": "Number of frames without a position change for an object to be considered stationary", "label": "Stationary threshold",
"description": "Number of frames with no position change required to mark an object as stationary." "description": "Number of frames with no position change required to mark an object as stationary."
}, },
"max_frames": { "max_frames": {
"label": "Max frames for stationary objects", "label": "Max frames",
"description": "Limits how long stationary objects are tracked before being discarded (override defaults to control retention).", "description": "Limits how long stationary objects are tracked before being discarded.",
"default": { "default": {
"label": "Default max frames", "label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping." "description": "Default maximum frames to track a stationary object before stopping."
}, },
"objects": { "objects": {
"label": "Object specific max frames", "label": "Object max frames",
"description": "Per-object overrides for maximum frames to track stationary objects." "description": "Per-object overrides for maximum frames to track stationary objects."
} }
}, },
"classifier": { "classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary", "label": "Enable visual classifier",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter." "description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
} }
}, },
"annotation_offset": { "annotation_offset": {
"label": "Milliseconds to offset detect annotations by", "label": "Annotation offset",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative." "description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
} }
} }

View File

@ -1,13 +1,53 @@
{ {
"label": "Detector hardware", "label": "Detector hardware",
"description": "Configuration for object detectors (CPU, EdgeTPU, GPU backends) and any detector-specific model settings.", "description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
"type": { "type": {
"label": "Detector Type", "label": "Detector Type",
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')." "description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')."
}, },
"model": { "model": {
"label": "Detector specific model configuration", "label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.)." "description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
}, },
"model_path": { "model_path": {
"label": "Detector specific model path", "label": "Detector specific model path",

View File

@ -1,4 +1,4 @@
{ {
"label": "Frigate environment variables", "label": "Environment variables",
"description": "Key/value pairs of environment variables to set for the Frigate process." "description": "Key/value pairs of environment variables to set for the Frigate process."
} }

View File

@ -1,44 +1,44 @@
{ {
"label": "Face recognition", "label": "Face recognition",
"description": "Global settings for face detection and recognition used across cameras unless overridden per-camera.", "description": "Settings for face detection and recognition; can be overridden per-camera.",
"enabled": { "enabled": {
"label": "Enable face recognition", "label": "Enable face recognition",
"description": "Enable or disable face recognition globally." "description": "Enable or disable face recognition globally."
}, },
"model_size": { "model_size": {
"label": "The size of the embeddings model used", "label": "Model size",
"description": "Model size to use for face embeddings (small/large); larger may require GPU." "description": "Model size to use for face embeddings (small/large); larger may require GPU."
}, },
"unknown_score": { "unknown_score": {
"label": "Minimum face distance score required to be marked as a potential match", "label": "Unknown score threshold",
"description": "Distance threshold below which a face is considered a potential match (lower = stricter)." "description": "Distance threshold below which a face is considered a potential match (higher = stricter)."
}, },
"detection_threshold": { "detection_threshold": {
"label": "Minimum face detection score required to be considered a face", "label": "Detection threshold",
"description": "Minimum detection confidence required to consider a face detection valid." "description": "Minimum detection confidence required to consider a face detection valid."
}, },
"recognition_threshold": { "recognition_threshold": {
"label": "Minimum face distance score required to be considered a match", "label": "Recognition threshold",
"description": "Face embedding distance threshold to consider two faces a match." "description": "Face embedding distance threshold to consider two faces a match."
}, },
"min_area": { "min_area": {
"label": "Min area of face box to consider running face recognition", "label": "Minimum face area",
"description": "Minimum area (pixels) of a detected face box required to attempt recognition." "description": "Minimum area (pixels) of a detected face box required to attempt recognition."
}, },
"min_faces": { "min_faces": {
"label": "Min face recognitions for the sub label to be applied to the person object", "label": "Minimum faces",
"description": "Minimum number of face recognitions required before applying a recognized sub-label to a person." "description": "Minimum number of face recognitions required before applying a recognized sub-label to a person."
}, },
"save_attempts": { "save_attempts": {
"label": "Number of face attempts to save in the recent recognitions tab", "label": "Saved attempts",
"description": "Number of face recognition attempts to retain for recent recognition UI." "description": "Number of face recognition attempts to retain for recent recognition UI."
}, },
"blur_confidence_filter": { "blur_confidence_filter": {
"label": "Apply blur quality filter to face confidence", "label": "Blur confidence filter",
"description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces." "description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces."
}, },
"device": { "device": {
"label": "The device key to use for face recognition", "label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
} }
} }

View File

@ -3,66 +3,66 @@
"description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.", "description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
"path": { "path": {
"label": "FFmpeg path", "label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias." "description": "Path to the FFmpeg binary to use globally or a version alias (\"5.0\" or \"7.0\")."
}, },
"global_args": { "global_args": {
"label": "FFmpeg arguments", "label": "FFmpeg global args",
"description": "args passed to FFmpeg processes by default." "description": "Global args passed to FFmpeg processes by default."
}, },
"hwaccel_args": { "hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments", "label": "Hardware acceleration args",
"description": "hardware acceleration arguments for FFmpeg (auto or provider-specific)." "description": "Hardware acceleration arguments for FFmpeg (auto or provider-specific)."
}, },
"input_args": { "input_args": {
"label": "FFmpeg input arguments", "label": "Input args",
"description": "input arguments applied to FFmpeg input streams by default." "description": "Input arguments applied to FFmpeg input streams by default."
}, },
"output_args": { "output_args": {
"label": "FFmpeg output arguments per role", "label": "Output args",
"description": "Default output args used for different FFmpeg roles such as detect and record.", "description": "Default output args used for different FFmpeg roles such as detect and record.",
"detect": { "detect": {
"label": "Detect role FFmpeg output arguments", "label": "Detect output args",
"description": "Default output args for detect role streams." "description": "Default output args for detect role streams."
}, },
"record": { "record": {
"label": "Record role FFmpeg output arguments", "label": "Record output args",
"description": "Default output args for record role streams." "description": "Default output args for record role streams."
} }
}, },
"retry_interval": {
"label": "FFmpeg retry time",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10."
},
"apple_compatibility": {
"label": "Apple compatibility",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index",
"description": "Default GPU index used for hardware acceleration if available."
},
"inputs": { "inputs": {
"label": "Camera inputs", "label": "Camera inputs",
"description": "List of input stream definitions (paths and roles) for this camera.", "description": "List of input stream definitions (paths and roles) for this camera.",
"path": { "path": {
"label": "Camera input path", "label": "Input path",
"description": "Camera input stream URL or path." "description": "Camera input stream URL or path."
}, },
"roles": { "roles": {
"label": "Roles assigned to this input", "label": "Input roles",
"description": "Roles for this input stream (for example: detect, record, audio)." "description": "Roles for this input stream (for example: detect, record, audio)."
}, },
"global_args": { "global_args": {
"label": "FFmpeg arguments", "label": "FFmpeg args",
"description": "FFmpeg arguments for this input stream." "description": "FFmpeg arguments for this input stream."
}, },
"hwaccel_args": { "hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments", "label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for this input stream." "description": "Hardware acceleration arguments for this input stream."
}, },
"input_args": { "input_args": {
"label": "FFmpeg input arguments", "label": "Input args",
"description": "Input arguments specific to this stream." "description": "Input arguments specific to this stream."
} }
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index to use for hardware acceleration",
"description": "Default GPU index used for hardware acceleration if available."
} }
} }

View File

@ -1,16 +1,16 @@
{ {
"label": "Live playback", "label": "Live playback",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.", "description": "Settings used by the Web UI to control live stream resolution and quality.",
"streams": { "streams": {
"label": "Friendly names and restream names to use for live view", "label": "Live stream names",
"description": "Mapping of configured stream names to restream/go2rtc names used for live playback." "description": "Mapping of configured stream names to restream/go2rtc names used for live playback."
}, },
"height": { "height": {
"label": "Live camera view height", "label": "Live height",
"description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height." "description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height."
}, },
"quality": { "quality": {
"label": "Live camera view quality", "label": "Live quality",
"description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)." "description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)."
} }
} }

View File

@ -2,11 +2,11 @@
"label": "Logging", "label": "Logging",
"description": "Controls default log verbosity and per-component log level overrides.", "description": "Controls default log verbosity and per-component log level overrides.",
"default": { "default": {
"label": "Default logging level", "label": "Logging level",
"description": "Default global log verbosity (debug, info, warning, error)." "description": "Default global log verbosity (debug, info, warning, error)."
}, },
"logs": { "logs": {
"label": "Log level for specified processes", "label": "Per-process log level",
"description": "Per-component log level overrides to increase or decrease verbosity for specific modules." "description": "Per-component log level overrides to increase or decrease verbosity for specific modules."
} }
} }

View File

@ -2,59 +2,65 @@
"label": "License Plate Recognition", "label": "License Plate Recognition",
"description": "License plate recognition settings including detection thresholds, formatting, and known plates.", "description": "License plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": { "enabled": {
"label": "Enable license plate recognition", "label": "Enable LPR",
"description": "Enable or disable LPR globally; camera-level settings can override." "description": "Enable or disable LPR globally; camera-level settings can override."
}, },
"model_size": { "model_size": {
"label": "The size of the embeddings model used", "label": "Model size",
"description": "Model size used for text detection/recognition; small runs on CPU, large on GPU." "description": "Model size used for text detection/recognition; small runs on CPU, large on GPU."
}, },
"detection_threshold": { "detection_threshold": {
"label": "License plate object confidence score required to begin running recognition", "label": "Detection threshold",
"description": "Detection confidence threshold to begin running OCR on a suspected plate." "description": "Detection confidence threshold to begin running OCR on a suspected plate."
}, },
"min_area": { "min_area": {
"label": "Minimum area of license plate to begin running recognition", "label": "Minimum plate area",
"description": "Minimum plate area (pixels) required to attempt recognition." "description": "Minimum plate area (pixels) required to attempt recognition."
}, },
"recognition_threshold": { "recognition_threshold": {
"label": "Recognition confidence score required to add the plate to the object as a sub label", "label": "Recognition threshold",
"description": "Confidence threshold required for recognized plate text to be attached as a sub-label." "description": "Confidence threshold required for recognized plate text to be attached as a sub-label."
}, },
"min_plate_length": { "min_plate_length": {
"label": "Minimum number of characters a license plate must have to be added to the object as a sub label", "label": "Min plate length",
"description": "Minimum number of characters a recognized plate must contain to be considered valid." "description": "Minimum number of characters a recognized plate must contain to be considered valid."
}, },
"format": { "format": {
"label": "Regular expression for the expected format of license plate", "label": "Plate format regex",
"description": "Optional regex to validate recognized plate strings against an expected format." "description": "Optional regex to validate recognized plate strings against an expected format."
}, },
"match_distance": { "match_distance": {
"label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate", "label": "Match distance",
"description": "Number of character mismatches allowed when comparing detected plates to known plates." "description": "Number of character mismatches allowed when comparing detected plates to known plates."
}, },
"known_plates": { "known_plates": {
"label": "Known plates to track (strings or regular expressions)", "label": "Known plates",
"description": "List of plates or regexes to specially track or alert on." "description": "List of plates or regexes to specially track or alert on."
}, },
"enhancement": { "enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition", "label": "Enhancement level",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results." "description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution."
}, },
"debug_save_plates": { "debug_save_plates": {
"label": "Save plates captured for LPR for debugging purposes", "label": "Save debug plates",
"description": "Save plate crop images for debugging LPR performance." "description": "Save plate crop images for debugging LPR performance."
}, },
"device": { "device": {
"label": "The device key to use for LPR", "label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}, },
"expire_time": {
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
},
"replace_rules": { "replace_rules": {
"label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'", "label": "Replacement rules",
"description": "Regex replacement rules used to normalize detected plate strings before matching." "description": "Regex replacement rules used to normalize detected plate strings before matching.",
"pattern": {
"label": "Regex pattern"
},
"replacement": {
"label": "Replacement string"
}
},
"expire_time": {
"label": "Expire seconds",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
} }
} }

View File

@ -1,6 +1,6 @@
{ {
"label": "Detection model", "label": "Detection model",
"description": "Settings to configure a custom object detection model, its input shape, and labelmap overrides.", "description": "Settings to configure a custom object detection model and its input shape.",
"path": { "path": {
"label": "Custom Object detection model path", "label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)." "description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."

View File

@ -6,12 +6,12 @@
"description": "Enable or disable motion detection globally; per-camera settings can override this." "description": "Enable or disable motion detection globally; per-camera settings can override this."
}, },
"threshold": { "threshold": {
"label": "Motion detection threshold (1-255)", "label": "Motion threshold",
"description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity." "description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)."
}, },
"lightning_threshold": { "lightning_threshold": {
"label": "Lightning detection threshold (0.3-1.0)", "label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive)." "description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
}, },
"improve_contrast": { "improve_contrast": {
"label": "Improve contrast", "label": "Improve contrast",
@ -22,27 +22,30 @@
"description": "Minimum contour area in pixels required for a motion contour to be counted." "description": "Minimum contour area in pixels required for a motion contour to be counted."
}, },
"delta_alpha": { "delta_alpha": {
"label": "Delta Alpha", "label": "Delta alpha",
"description": "Alpha blending factor used in frame differencing for motion calculation." "description": "Alpha blending factor used in frame differencing for motion calculation."
}, },
"frame_alpha": { "frame_alpha": {
"label": "Frame Alpha", "label": "Frame alpha",
"description": "Alpha value used when blending frames for motion preprocessing." "description": "Alpha value used when blending frames for motion preprocessing."
}, },
"frame_height": { "frame_height": {
"label": "Frame Height", "label": "Frame height",
"description": "Height in pixels to scale frames to when computing motion (useful for performance)." "description": "Height in pixels to scale frames to when computing motion (useful for performance)."
}, },
"mask": { "mask": {
"label": "Coordinates polygon for the motion mask.", "label": "Mask coordinates",
"description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas." "description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas."
}, },
"mqtt_off_delay": { "mqtt_off_delay": {
"label": "Delay for updating MQTT with no motion detected", "label": "MQTT off delay",
"description": "Seconds to wait after last motion before publishing an MQTT 'off' state." "description": "Seconds to wait after last motion before publishing an MQTT 'off' state."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of motion detection", "label": "Original motion state",
"description": "Indicates whether motion detection was enabled in the original static configuration." "description": "Indicates whether motion detection was enabled in the original static configuration."
},
"raw_mask": {
"label": "Raw Mask"
} }
} }

View File

@ -1,52 +1,52 @@
{ {
"label": "MQTT", "label": "MQTT",
"description": "Settings for connecting and publishing telemetry, snapshots, and events to an MQTT broker.", "description": "Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
"enabled": { "enabled": {
"label": "Enable MQTT Communication", "label": "Enable MQTT",
"description": "Enable or disable MQTT integration for state, events, and snapshots." "description": "Enable or disable MQTT integration for state, events, and snapshots."
}, },
"host": { "host": {
"label": "MQTT Host", "label": "MQTT host",
"description": "Hostname or IP address of the MQTT broker." "description": "Hostname or IP address of the MQTT broker."
}, },
"port": { "port": {
"label": "MQTT Port", "label": "MQTT port",
"description": "Port of the MQTT broker (usually 1883 for plain MQTT)." "description": "Port of the MQTT broker (usually 1883 for plain MQTT)."
}, },
"topic_prefix": { "topic_prefix": {
"label": "MQTT Topic Prefix", "label": "Topic prefix",
"description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances." "description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances."
}, },
"client_id": { "client_id": {
"label": "MQTT Client ID", "label": "Client ID",
"description": "Client identifier used when connecting to the MQTT broker; should be unique per instance." "description": "Client identifier used when connecting to the MQTT broker; should be unique per instance."
}, },
"stats_interval": { "stats_interval": {
"label": "MQTT Camera Stats Interval", "label": "Stats interval",
"description": "Interval in seconds for publishing system and camera stats to MQTT." "description": "Interval in seconds for publishing system and camera stats to MQTT."
}, },
"user": { "user": {
"label": "MQTT Username", "label": "MQTT username",
"description": "Optional MQTT username; can be provided via environment variables or secrets." "description": "Optional MQTT username; can be provided via environment variables or secrets."
}, },
"password": { "password": {
"label": "MQTT Password", "label": "MQTT password",
"description": "Optional MQTT password; can be provided via environment variables or secrets." "description": "Optional MQTT password; can be provided via environment variables or secrets."
}, },
"tls_ca_certs": { "tls_ca_certs": {
"label": "MQTT TLS CA Certificates", "label": "TLS CA certs",
"description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)." "description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)."
}, },
"tls_client_cert": { "tls_client_cert": {
"label": "MQTT TLS Client Certificate", "label": "Client cert",
"description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs." "description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs."
}, },
"tls_client_key": { "tls_client_key": {
"label": "MQTT TLS Client Key", "label": "Client key",
"description": "Private key path for the client certificate." "description": "Private key path for the client certificate."
}, },
"tls_insecure": { "tls_insecure": {
"label": "MQTT TLS Insecure", "label": "TLS insecure",
"description": "Allow insecure TLS connections by skipping hostname verification (not recommended)." "description": "Allow insecure TLS connections by skipping hostname verification (not recommended)."
}, },
"qos": { "qos": {

View File

@ -2,10 +2,10 @@
"label": "Networking", "label": "Networking",
"description": "Network-related settings such as IPv6 enablement for Frigate endpoints.", "description": "Network-related settings such as IPv6 enablement for Frigate endpoints.",
"ipv6": { "ipv6": {
"label": "IPv6 IPv6 settings", "label": "IPv6 settings",
"description": "IPv6-specific settings for Frigate network services.", "description": "IPv6-specific settings for Frigate network services.",
"enabled": { "enabled": {
"label": "Enable IPv6 for port 5000 and/or 8971", "label": "Enable IPv6",
"description": "Enable IPv6 support for Frigate services (API and UI) where applicable." "description": "Enable IPv6 support for Frigate services (API and UI) where applicable."
}, },
"listen": { "listen": {

View File

@ -6,15 +6,15 @@
"description": "Enable or disable notifications globally." "description": "Enable or disable notifications globally."
}, },
"email": { "email": {
"label": "Email required for push", "label": "Notification email",
"description": "Email address used for push notifications or required by certain notification providers." "description": "Email address used for push notifications or required by certain notification providers."
}, },
"cooldown": { "cooldown": {
"label": "Cooldown period for notifications (time in seconds)", "label": "Cooldown period",
"description": "Cooldown (seconds) between notifications to avoid spamming recipients." "description": "Cooldown (seconds) between notifications to avoid spamming recipients."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of notifications", "label": "Original notifications state",
"description": "Indicates whether notifications were enabled in the original static configuration." "description": "Indicates whether notifications were enabled in the original static configuration."
} }
} }

View File

@ -9,32 +9,35 @@
"label": "Object filters", "label": "Object filters",
"description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).", "description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
"min_area": { "min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)", "label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type." "description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
}, },
"max_area": { "max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)", "label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type." "description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
}, },
"min_ratio": { "min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted", "label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify." "description": "Minimum width/height ratio required for the bounding box to qualify."
}, },
"max_ratio": { "max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted", "label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify." "description": "Maximum width/height ratio allowed for the bounding box to qualify."
}, },
"threshold": { "threshold": {
"label": "Average detection confidence threshold for object to be counted", "label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive." "description": "Average detection confidence threshold required for the object to be considered a true positive."
}, },
"min_score": { "min_score": {
"label": "Minimum detection confidence for object to be counted", "label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted." "description": "Minimum single-frame detection confidence required for the object to be counted."
}, },
"mask": { "mask": {
"label": "Detection area polygon mask for this filter configuration", "label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame." "description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
} }
}, },
"mask": { "mask": {
@ -42,50 +45,50 @@
"description": "Mask polygon used to prevent object detection in specified areas." "description": "Mask polygon used to prevent object detection in specified areas."
}, },
"genai": { "genai": {
"label": "Config for using genai to analyze objects", "label": "GenAI object config",
"description": "GenAI options for describing tracked objects and sending frames for generation.", "description": "GenAI options for describing tracked objects and sending frames for generation.",
"enabled": { "enabled": {
"label": "Enable GenAI for camera", "label": "Enable GenAI",
"description": "Enable GenAI generation of descriptions for tracked objects by default." "description": "Enable GenAI generation of descriptions for tracked objects by default."
}, },
"use_snapshot": { "use_snapshot": {
"label": "Use snapshots for generating descriptions", "label": "Use snapshots",
"description": "Use object snapshots instead of thumbnails for GenAI description generation." "description": "Use object snapshots instead of thumbnails for GenAI description generation."
}, },
"prompt": { "prompt": {
"label": "Default caption prompt", "label": "Caption prompt",
"description": "Default prompt template used when generating descriptions with GenAI." "description": "Default prompt template used when generating descriptions with GenAI."
}, },
"object_prompts": { "object_prompts": {
"label": "Object specific prompts", "label": "Object prompts",
"description": "Per-object prompts to customize GenAI outputs for specific labels." "description": "Per-object prompts to customize GenAI outputs for specific labels."
}, },
"objects": { "objects": {
"label": "List of objects to run generative AI for", "label": "GenAI objects",
"description": "List of object labels to send to GenAI by default." "description": "List of object labels to send to GenAI by default."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to run generative AI", "label": "Required zones",
"description": "Zones that must be entered for objects to qualify for GenAI description generation." "description": "Zones that must be entered for objects to qualify for GenAI description generation."
}, },
"debug_save_thumbnails": { "debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes", "label": "Save thumbnails",
"description": "Save thumbnails sent to GenAI for debugging and review." "description": "Save thumbnails sent to GenAI for debugging and review."
}, },
"send_triggers": { "send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object", "label": "GenAI triggers",
"description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).", "description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).",
"tracked_object_end": { "tracked_object_end": {
"label": "Send once the object is no longer tracked", "label": "Send on end",
"description": "Send a request to GenAI when the tracked object ends." "description": "Send a request to GenAI when the tracked object ends."
}, },
"after_significant_updates": { "after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated", "label": "Early GenAI trigger",
"description": "Send a request to GenAI after a specified number of significant updates for the tracked object." "description": "Send a request to GenAI after a specified number of significant updates for the tracked object."
} }
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of generative AI", "label": "Original GenAI state",
"description": "Indicates whether GenAI was enabled in the original static config." "description": "Indicates whether GenAI was enabled in the original static config."
} }
} }

View File

@ -2,71 +2,71 @@
"label": "ONVIF", "label": "ONVIF",
"description": "ONVIF connection and PTZ autotracking settings for this camera.", "description": "ONVIF connection and PTZ autotracking settings for this camera.",
"host": { "host": {
"label": "Onvif Host", "label": "ONVIF host",
"description": "Host (and optional scheme) for the ONVIF service for this camera." "description": "Host (and optional scheme) for the ONVIF service for this camera."
}, },
"port": { "port": {
"label": "Onvif Port", "label": "ONVIF port",
"description": "Port number for the ONVIF service." "description": "Port number for the ONVIF service."
}, },
"user": { "user": {
"label": "Onvif Username", "label": "ONVIF username",
"description": "Username for ONVIF authentication; some devices require admin user for ONVIF." "description": "Username for ONVIF authentication; some devices require admin user for ONVIF."
}, },
"password": { "password": {
"label": "Onvif Password", "label": "ONVIF password",
"description": "Password for ONVIF authentication." "description": "Password for ONVIF authentication."
}, },
"tls_insecure": { "tls_insecure": {
"label": "Onvif Disable TLS verification", "label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)." "description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
}, },
"autotracking": { "autotracking": {
"label": "PTZ auto tracking config", "label": "PTZ config",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.", "description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
"enabled": { "enabled": {
"label": "Enable PTZ object autotracking", "label": "Enable Autotracking",
"description": "Enable or disable automatic PTZ camera tracking of detected objects." "description": "Enable or disable automatic PTZ camera tracking of detected objects."
}, },
"calibrate_on_startup": { "calibrate_on_startup": {
"label": "Perform a camera calibration when Frigate starts", "label": "Calibrate on start",
"description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration." "description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration."
}, },
"zooming": { "zooming": {
"label": "Autotracker zooming mode", "label": "Zoom mode",
"description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)." "description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)."
}, },
"zoom_factor": { "zoom_factor": {
"label": "Zooming factor (0.1-0.75)", "label": "Zoom factor",
"description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking." "description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75."
}, },
"track": { "track": {
"label": "Objects to track", "label": "Tracked objects",
"description": "List of object types from labelmap.txt that should trigger autotracking." "description": "List of object types that should trigger autotracking."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to begin autotracking", "label": "Required zones",
"description": "Objects must enter one of these zones before autotracking begins." "description": "Objects must enter one of these zones before autotracking begins."
}, },
"return_preset": { "return_preset": {
"label": "Name of camera preset to return to when object tracking is over", "label": "Return preset",
"description": "ONVIF preset name configured in camera firmware to return to after tracking ends." "description": "ONVIF preset name configured in camera firmware to return to after tracking ends."
}, },
"timeout": { "timeout": {
"label": "Seconds to delay before returning to preset", "label": "Return timeout",
"description": "Wait this many seconds after losing tracking before returning camera to preset position." "description": "Wait this many seconds after losing tracking before returning camera to preset position."
}, },
"movement_weights": { "movement_weights": {
"label": "Internal value used for PTZ movements based on the speed of your camera's motor", "label": "Movement weights",
"description": "Calibration values automatically generated by camera calibration. Do not modify manually." "description": "Calibration values automatically generated by camera calibration. Do not modify manually."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of autotracking", "label": "Original autotrack state",
"description": "Internal field to track whether autotracking was enabled in configuration." "description": "Internal field to track whether autotracking was enabled in configuration."
} }
}, },
"ignore_time_mismatch": { "ignore_time_mismatch": {
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server", "label": "Ignore time mismatch",
"description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication." "description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication."
} }
} }

View File

@ -2,35 +2,35 @@
"label": "Proxy", "label": "Proxy",
"description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.", "description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
"header_map": { "header_map": {
"label": "Header mapping definitions for proxy user passing", "label": "Header mapping",
"description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.", "description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.",
"user": { "user": {
"label": "Header name from upstream proxy to identify user", "label": "User header",
"description": "Header containing the authenticated username provided by the upstream proxy." "description": "Header containing the authenticated username provided by the upstream proxy."
}, },
"role": { "role": {
"label": "Header name from upstream proxy to identify user role", "label": "Role header",
"description": "Header containing the authenticated user's role or groups from the upstream proxy." "description": "Header containing the authenticated user's role or groups from the upstream proxy."
}, },
"role_map": { "role_map": {
"label": "Mapping of Frigate roles to upstream group values. ", "label": "Role mapping",
"description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)." "description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)."
} }
}, },
"logout_url": { "logout_url": {
"label": "Redirect url for logging out with proxy", "label": "Logout URL",
"description": "URL to redirect users to when logging out via the proxy." "description": "URL to redirect users to when logging out via the proxy."
}, },
"auth_secret": { "auth_secret": {
"label": "Secret value for proxy authentication", "label": "Proxy secret",
"description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies." "description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies."
}, },
"default_role": { "default_role": {
"label": "Default role for proxy users", "label": "Default role",
"description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)." "description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)."
}, },
"separator": { "separator": {
"label": "The character used to separate values in a mapped header", "label": "Separator character",
"description": "Character used to split multiple values provided in proxy headers (for example a comma)." "description": "Character used to split multiple values provided in proxy headers."
} }
} }

View File

@ -2,99 +2,95 @@
"label": "Recording", "label": "Recording",
"description": "Recording and retention settings applied to cameras unless overridden per-camera.", "description": "Recording and retention settings applied to cameras unless overridden per-camera.",
"enabled": { "enabled": {
"label": "Enable record on all cameras", "label": "Enable recording",
"description": "Enable or disable recording globally; individual cameras can override this." "description": "Enable or disable recording globally; individual cameras can override this."
}, },
"expire_interval": { "expire_interval": {
"label": "Number of minutes to wait between cleanup runs", "label": "Record cleanup interval",
"description": "Minutes between cleanup passes that remove expired recording segments." "description": "Minutes between cleanup passes that remove expired recording segments."
}, },
"continuous": { "continuous": {
"label": "Continuous recording retention settings", "label": "Continuous retention",
"description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.", "description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": { "days": {
"label": "Default retention period", "label": "Retention days",
"description": "Days to retain continuous (always-on) recordings." "description": "Days to retain recordings."
} }
}, },
"motion": { "motion": {
"label": "Motion recording retention settings", "label": "Motion retention",
"description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.", "description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": { "days": {
"label": "Default retention period", "label": "Retention days",
"description": "Days to retain recordings triggered by motion." "description": "Days to retain recordings."
} }
}, },
"detections": { "detections": {
"label": "Detection specific retention settings", "label": "Detection retention",
"description": "Recording retention settings for detection events including pre/post capture durations.", "description": "Recording retention settings for detection events including pre/post capture durations.",
"pre_capture": { "pre_capture": {
"label": "Seconds to retain before event starts", "label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording." "description": "Number of seconds before the detection event to include in the recording."
}, },
"post_capture": { "post_capture": {
"label": "Seconds to retain after event ends", "label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording." "description": "Number of seconds after the detection event to include in the recording."
}, },
"retain": { "retain": {
"label": "Event retention settings", "label": "Event retention",
"description": "Retention settings for recordings of detection events.", "description": "Retention settings for recordings of detection events.",
"days": { "days": {
"label": "Default retention period", "label": "Retention days",
"description": "Number of days to retain recordings of detection events." "description": "Number of days to retain recordings of detection events."
}, },
"mode": { "mode": {
"label": "Retain mode", "label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
} }
} }
}, },
"alerts": { "alerts": {
"label": "Alert specific retention settings", "label": "Alert retention",
"description": "Recording retention settings for alert events including pre/post capture durations.", "description": "Recording retention settings for alert events including pre/post capture durations.",
"pre_capture": { "pre_capture": {
"label": "Seconds to retain before event starts", "label": "Pre-capture seconds",
"description": "Number of seconds before the alert event to include in the recording." "description": "Number of seconds before the detection event to include in the recording."
}, },
"post_capture": { "post_capture": {
"label": "Seconds to retain after event ends", "label": "Post-capture seconds",
"description": "Number of seconds after the alert event to include in the recording." "description": "Number of seconds after the detection event to include in the recording."
}, },
"retain": { "retain": {
"label": "Event retention settings", "label": "Event retention",
"description": "Retention settings for recordings of alert events.", "description": "Retention settings for recordings of detection events.",
"days": { "days": {
"label": "Default retention period", "label": "Retention days",
"description": "Number of days to retain recordings of alert events." "description": "Number of days to retain recordings of detection events."
}, },
"mode": { "mode": {
"label": "Retain mode", "label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
} }
} }
}, },
"export": { "export": {
"label": "Recording Export Config", "label": "Export config",
"description": "Settings used when exporting recordings such as timelapse and hardware acceleration.", "description": "Settings used when exporting recordings such as timelapse and hardware acceleration.",
"timelapse_args": {
"label": "Timelapse output arguments",
"description": "FFmpeg arguments for timelapse exports. Default args fit 24 hours of recording into 1 hour playback (-vf setpts=0.04*PTS -r 30)."
},
"hwaccel_args": { "hwaccel_args": {
"label": "Export-specific FFmpeg hardware acceleration arguments", "label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations." "description": "Hardware acceleration args to use for export/transcode operations."
} }
}, },
"preview": { "preview": {
"label": "Recording Preview Config", "label": "Preview config",
"description": "Settings controlling the quality of recording previews shown in the UI.", "description": "Settings controlling the quality of recording previews shown in the UI.",
"quality": { "quality": {
"label": "Quality of recording preview", "label": "Preview quality",
"description": "Preview quality level (very_low, low, medium, high, very_high)." "description": "Preview quality level (very_low, low, medium, high, very_high)."
} }
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of recording", "label": "Original recording state",
"description": "Indicates whether recording was enabled in the original static configuration." "description": "Indicates whether recording was enabled in the original static configuration."
} }
} }

View File

@ -2,58 +2,58 @@
"label": "Review", "label": "Review",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.", "description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
"alerts": { "alerts": {
"label": "Review alerts config", "label": "Alerts config",
"description": "Settings for which tracked objects generate alerts and how alerts are retained.", "description": "Settings for which tracked objects generate alerts and how alerts are retained.",
"enabled": { "enabled": {
"label": "Enable alerts", "label": "Enable alerts",
"description": "Enable or disable alert generation for this camera." "description": "Enable or disable alert generation for this camera."
}, },
"labels": { "labels": {
"label": "Labels to create alerts for", "label": "Alert labels",
"description": "List of object labels that qualify as alerts (for example: car, person)." "description": "List of object labels that qualify as alerts (for example: car, person)."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert", "label": "Required zones",
"description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone." "description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of alerts", "label": "Original alerts state",
"description": "Tracks whether alerts were originally enabled in the static configuration." "description": "Tracks whether alerts were originally enabled in the static configuration."
}, },
"cutoff_time": { "cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred", "label": "Alerts cutoff time",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert." "description": "Seconds to wait after no alert-causing activity before cutting off an alert."
} }
}, },
"detections": { "detections": {
"label": "Review detections config", "label": "Detections config",
"description": "Settings for creating detection events (non-alert) and how long to keep them.", "description": "Settings for creating detection events (non-alert) and how long to keep them.",
"enabled": { "enabled": {
"label": "Enable detections", "label": "Enable detections",
"description": "Enable or disable detection events for this camera." "description": "Enable or disable detection events for this camera."
}, },
"labels": { "labels": {
"label": "Labels to create detections for", "label": "Detection labels",
"description": "List of object labels that qualify as detection events." "description": "List of object labels that qualify as detection events."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection", "label": "Required zones",
"description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone." "description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone."
}, },
"cutoff_time": { "cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred", "label": "Detections cutoff time",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection." "description": "Seconds to wait after no detection-causing activity before cutting off a detection."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of detections", "label": "Original detections state",
"description": "Tracks whether detections were originally enabled in the static configuration." "description": "Tracks whether detections were originally enabled in the static configuration."
} }
}, },
"genai": { "genai": {
"label": "Review description genai config", "label": "GenAI config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items.", "description": "Controls use of generative AI for producing descriptions and summaries of review items.",
"enabled": { "enabled": {
"label": "Enable GenAI descriptions for review items", "label": "Enable GenAI descriptions",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items." "description": "Enable or disable GenAI-generated descriptions and summaries for review items."
}, },
"alerts": { "alerts": {
@ -65,27 +65,27 @@
"description": "Use GenAI to generate descriptions for detection items." "description": "Use GenAI to generate descriptions for detection items."
}, },
"image_source": { "image_source": {
"label": "Image source for review descriptions", "label": "Review image source",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens." "description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens."
}, },
"additional_concerns": { "additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera", "label": "Additional concerns",
"description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera." "description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera."
}, },
"debug_save_thumbnails": { "debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes", "label": "Save thumbnails",
"description": "Save thumbnails that are sent to the GenAI provider for debugging and review." "description": "Save thumbnails that are sent to the GenAI provider for debugging and review."
}, },
"enabled_in_config": { "enabled_in_config": {
"label": "Keep track of original state of generative AI", "label": "Original GenAI state",
"description": "Tracks whether GenAI review was originally enabled in the static configuration." "description": "Tracks whether GenAI review was originally enabled in the static configuration."
}, },
"preferred_language": { "preferred_language": {
"label": "Preferred language for GenAI Response", "label": "Preferred language",
"description": "Preferred language to request from the GenAI provider for generated responses." "description": "Preferred language to request from the GenAI provider for generated responses."
}, },
"activity_context_prompt": { "activity_context_prompt": {
"label": "Custom activity context prompt defining normal and suspicious activity patterns for this property", "label": "Activity context prompt",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries." "description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
} }
} }

View File

@ -1,4 +1,4 @@
{ {
"label": "If Frigate should be started in safe mode", "label": "Safe mode",
"description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting." "description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting."
} }

View File

@ -1,46 +1,52 @@
{ {
"label": "Semantic Search", "label": "Semantic Search",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.", "description": "Settings for Semantic Search which builds and queries object embeddings to find similar items.",
"enabled": { "enabled": {
"label": "Enable semantic search", "label": "Enable semantic search",
"description": "Enable or disable the semantic search feature." "description": "Enable or disable the semantic search feature."
}, },
"reindex": { "reindex": {
"label": "Reindex all tracked objects on startup", "label": "Reindex on startup",
"description": "Trigger a full reindex of historical tracked objects into the embeddings database." "description": "Trigger a full reindex of historical tracked objects into the embeddings database."
}, },
"model": { "model": {
"label": "The CLIP model to use for semantic search", "label": "Semantic search model",
"description": "The embeddings model to use for semantic search (for example 'jinav1')." "description": "The embeddings model to use for semantic search (for example 'jinav1')."
}, },
"model_size": { "model_size": {
"label": "The size of the embeddings model used", "label": "Model size",
"description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU." "description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU."
}, },
"device": { "device": {
"label": "The device key to use for semantic search", "label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information" "description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}, },
"triggers": { "triggers": {
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions", "label": "Triggers",
"description": "Actions and matching criteria for camera-specific semantic search triggers.", "description": "Actions and matching criteria for camera-specific semantic search triggers.",
"friendly_name": { "friendly_name": {
"label": "Trigger friendly name used in the Frigate UI" "label": "Friendly name",
"description": "Optional friendly name displayed in the UI for this trigger."
}, },
"enabled": { "enabled": {
"label": "Enable this trigger" "label": "Enable this trigger",
"description": "Enable or disable this semantic search trigger."
}, },
"type": { "type": {
"label": "Type of trigger" "label": "Trigger type",
"description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)."
}, },
"data": { "data": {
"label": "Trigger content (text phrase or image ID)" "label": "Trigger content",
"description": "Text phrase or thumbnail ID to match against tracked objects."
}, },
"threshold": { "threshold": {
"label": "Confidence score required to run the trigger" "label": "Trigger threshold",
"description": "Minimum similarity score (0-1) required to activate this trigger."
}, },
"actions": { "actions": {
"label": "Actions to perform when trigger is matched" "label": "Trigger actions",
"description": "List of actions to execute when trigger matches (notification, sub_label, attribute)."
} }
} }
} }

View File

@ -6,47 +6,47 @@
"description": "Enable or disable saving snapshots globally." "description": "Enable or disable saving snapshots globally."
}, },
"clean_copy": { "clean_copy": {
"label": "Create a clean copy of the snapshot image", "label": "Save clean copy",
"description": "Save an unannotated clean copy of snapshots in addition to annotated ones." "description": "Save an unannotated clean copy of snapshots in addition to annotated ones."
}, },
"timestamp": { "timestamp": {
"label": "Add a timestamp overlay on the snapshot", "label": "Timestamp overlay",
"description": "Overlay a timestamp on saved snapshots." "description": "Overlay a timestamp on saved snapshots."
}, },
"bounding_box": { "bounding_box": {
"label": "Add a bounding box overlay on the snapshot", "label": "Bounding box overlay",
"description": "Draw bounding boxes for tracked objects on saved snapshots." "description": "Draw bounding boxes for tracked objects on saved snapshots."
}, },
"crop": { "crop": {
"label": "Crop the snapshot to the detected object", "label": "Crop snapshot",
"description": "Crop saved snapshots to the detected object's bounding box." "description": "Crop saved snapshots to the detected object's bounding box."
}, },
"required_zones": { "required_zones": {
"label": "List of required zones to be entered in order to save a snapshot", "label": "Required zones",
"description": "Zones an object must enter for a snapshot to be saved." "description": "Zones an object must enter for a snapshot to be saved."
}, },
"height": { "height": {
"label": "Snapshot image height", "label": "Snapshot height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size." "description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size."
}, },
"retain": { "retain": {
"label": "Snapshot retention", "label": "Snapshot retention",
"description": "Retention settings for saved snapshots including default days and per-object overrides.", "description": "Retention settings for saved snapshots including default days and per-object overrides.",
"default": { "default": {
"label": "Default retention period", "label": "Default retention",
"description": "Default number of days to retain snapshots." "description": "Default number of days to retain snapshots."
}, },
"mode": { "mode": {
"label": "Retain mode", "label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)." "description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}, },
"objects": { "objects": {
"label": "Object retention period", "label": "Object retention",
"description": "Per-object overrides for snapshot retention days." "description": "Per-object overrides for snapshot retention days."
} }
}, },
"quality": { "quality": {
"label": "Quality of the encoded jpeg (0-100)", "label": "JPEG quality",
"description": "JPEG encode quality for saved snapshots (0-100)." "description": "JPEG encode quality for saved snapshots (0-100)."
} }
} }

View File

@ -2,31 +2,31 @@
"label": "Telemetry", "label": "Telemetry",
"description": "System telemetry and stats options including GPU and network bandwidth monitoring.", "description": "System telemetry and stats options including GPU and network bandwidth monitoring.",
"network_interfaces": { "network_interfaces": {
"label": "Enabled network interfaces for bandwidth calculation", "label": "Network interfaces",
"description": "List of network interface name prefixes to monitor for bandwidth statistics." "description": "List of network interface name prefixes to monitor for bandwidth statistics."
}, },
"stats": { "stats": {
"label": "System Stats", "label": "System stats",
"description": "Options to enable/disable collection of various system and GPU statistics.", "description": "Options to enable/disable collection of various system and GPU statistics.",
"amd_gpu_stats": { "amd_gpu_stats": {
"label": "Enable AMD GPU stats", "label": "AMD GPU stats",
"description": "Enable collection of AMD GPU statistics if an AMD GPU is present." "description": "Enable collection of AMD GPU statistics if an AMD GPU is present."
}, },
"intel_gpu_stats": { "intel_gpu_stats": {
"label": "Enable Intel GPU stats", "label": "Intel GPU stats",
"description": "Enable collection of Intel GPU statistics if an Intel GPU is present." "description": "Enable collection of Intel GPU statistics if an Intel GPU is present."
}, },
"network_bandwidth": { "network_bandwidth": {
"label": "Enable network bandwidth for ffmpeg processes", "label": "Network bandwidth",
"description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)." "description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)."
}, },
"intel_gpu_device": { "intel_gpu_device": {
"label": "Define the device to use when gathering SR-IOV stats", "label": "SR-IOV device",
"description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats." "description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats."
} }
}, },
"version_check": { "version_check": {
"label": "Enable latest version check", "label": "Version check",
"description": "Enable an outbound check to detect if a newer Frigate version is available." "description": "Enable an outbound check to detect if a newer Frigate version is available."
} }
} }

View File

@ -2,7 +2,7 @@
"label": "TLS", "label": "TLS",
"description": "TLS settings for Frigate's web endpoints (port 8971).", "description": "TLS settings for Frigate's web endpoints (port 8971).",
"enabled": { "enabled": {
"label": "Enable TLS for port 8971", "label": "Enable TLS",
"description": "Enable TLS for Frigate's web UI and API on the configured TLS port." "description": "Enable TLS for Frigate's web UI and API on the configured TLS port."
} }
} }

View File

@ -2,23 +2,23 @@
"label": "UI", "label": "UI",
"description": "User interface preferences such as timezone, time/date formatting, and units.", "description": "User interface preferences such as timezone, time/date formatting, and units.",
"timezone": { "timezone": {
"label": "Override UI timezone", "label": "Timezone",
"description": "Optional timezone to display across the UI (defaults to browser local time if unset)." "description": "Optional timezone to display across the UI (defaults to browser local time if unset)."
}, },
"time_format": { "time_format": {
"label": "Override UI time format", "label": "Time format",
"description": "Time format to use in the UI (browser, 12hour, or 24hour)." "description": "Time format to use in the UI (browser, 12hour, or 24hour)."
}, },
"date_style": { "date_style": {
"label": "Override UI dateStyle", "label": "Date style",
"description": "Date style to use in the UI (full, long, medium, short)." "description": "Date style to use in the UI (full, long, medium, short)."
}, },
"time_style": { "time_style": {
"label": "Override UI timeStyle", "label": "Time style",
"description": "Time style to use in the UI (full, long, medium, short)." "description": "Time style to use in the UI (full, long, medium, short)."
}, },
"unit_system": { "unit_system": {
"label": "The unit system to use for measurements", "label": "Unit system",
"description": "Unit system for display (metric or imperial) used in the UI and MQTT." "description": "Unit system for display (metric or imperial) used in the UI and MQTT."
} }
} }