i18n tweaks

This commit is contained in:
Josh Hawkins 2026-01-27 17:10:29 -06:00
parent 0b148d66f3
commit bc5d7cf575
33 changed files with 53 additions and 879 deletions

View File

@ -1,6 +1,6 @@
{
"label": "Global Audio events configuration",
"description": "Global settings for audio-based event detection; camera-level settings can override these.",
"label": "Audio events",
"description": "settings for audio-based event detection; camera-level settings can override these.",
"groups": {
"detection": "Detection",
"sensitivity": "Sensitivity"

View File

@ -1,5 +1,5 @@
{
"label": "Audio transcription config",
"label": "Audio transcription",
"description": "Settings for live and speech audio transcription used for events and live captions.",
"enabled": {
"label": "Enable audio transcription",

View File

@ -1,5 +1,5 @@
{
"label": "Auth configuration",
"label": "Authentication",
"description": "Authentication and session-related settings including cookie and rate limit options.",
"enabled": {
"label": "Enable authentication",

View File

@ -1,5 +1,5 @@
{
"label": "Birdseye configuration",
"label": "Birdseye",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": {
"label": "Enable birdseye view",
@ -34,7 +34,7 @@
"description": "Seconds of inactivity after which a camera will stop being shown in Birdseye."
},
"layout": {
"label": "Birdseye Layout Config",
"label": "Birdseye Layout",
"description": "Layout options for the Birdseye composition.",
"scaling_factor": {
"label": "Birdseye Scaling Factor",

View File

@ -1,5 +1,5 @@
{
"label": "Camera group configuration",
"label": "Camera groups",
"description": "Configuration for named camera groups used to organize cameras in the UI.",
"cameras": {
"label": "List of cameras in this group",

View File

@ -1,6 +1,6 @@
{
"label": "MQTT configuration",
"description": "Camera-specific MQTT image publishing settings.",
"label": "MQTT",
"description": "MQTT image publishing settings.",
"enabled": {
"label": "Send image over MQTT",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera."

View File

@ -1,5 +1,5 @@
{
"label": "Camera UI Modifications",
"label": "Camera UI",
"description": "Display ordering and dashboard visibility for this camera in the UI.",
"order": {
"label": "Order of camera in UI",

View File

@ -1,826 +0,0 @@
{
"label": "Camera configuration",
"description": "Per-camera settings that control streams, detection, recording, notifications, and integrations.",
"name": {
"label": "Camera name",
"description": "Unique identifier for the camera used in configuration and topics."
},
"friendly_name": {
"label": "Camera friendly name used in the Frigate UI",
"description": "Human-friendly display name shown in the Frigate UI."
},
"enabled": {
"label": "Enable camera",
"description": "Enable or disable this camera configuration; disabled cameras do not capture live streams."
},
"audio": {
"label": "Audio events configuration",
"description": "Camera-specific audio detection settings that override global audio options.",
"enabled": {
"label": "Enable audio events",
"description": "Enable or disable audio event detection for this camera. Can be overridden by global settings."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event",
"description": "Amount of seconds without the configured audio type before the audio event is ended."
},
"min_volume": {
"label": "Min volume required to run audio detection",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity."
},
"listen": {
"label": "Audio to listen for",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
},
"filters": {
"label": "Audio filters",
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection",
"description": "Indicates whether audio detection was enabled in the original static config for this camera."
},
"num_threads": {
"label": "Number of detection threads",
"description": "Number of worker threads to use for audio processing on this camera."
}
},
"audio_transcription": {
"label": "Audio transcription config",
"description": "Settings for live and speech audio transcription on this camera.",
"enabled": {
"label": "Enable audio transcription",
"description": "Enable live and speech transcription for audio events on this camera."
},
"enabled_in_config": {
"label": "Keep track of original state of audio transcription",
"description": "Indicates whether audio transcription was originally enabled in the static config."
},
"live_enabled": {
"label": "Enable live transcriptions",
"description": "Enable real-time live transcription for audio streams on this camera."
}
},
"birdseye": {
"label": "Birdseye camera configuration",
"description": "Per-camera Birdseye settings controlling inclusion and ordering in the composite view.",
"enabled": {
"label": "Enable birdseye view for camera",
"description": "Include this camera in the Birdseye composite view when enabled."
},
"mode": {
"label": "Tracking mode for camera",
"description": "Mode used when evaluating this camera for Birdseye inclusion (objects, motion, continuous)."
},
"order": {
"label": "Position of the camera in the birdseye view",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
}
},
"detect": {
"label": "Object detection configuration",
"description": "Settings for the detect role used to run object detection and initialize trackers for this camera.",
"enabled": {
"label": "Detection Enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for tracking to run."
},
"height": {
"label": "Height of the stream for the detect role",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"width": {
"label": "Width of the stream for the detect role",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"fps": {
"label": "Number of frames per second to process through detection",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended ~5)."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends",
"description": "Number of frames without a detection before a tracked object is considered gone."
},
"stationary": {
"label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": {
"label": "Frame interval for checking stationary objects",
"description": "How often (in frames) to run a detection check to confirm a stationary object."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary",
"description": "Number of frames with no position change required to mark an object as stationary."
},
"max_frames": {
"label": "Max frames for stationary objects",
"description": "Limits how long stationary objects are tracked before being discarded (override defaults to control retention).",
"default": {
"label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping."
},
"objects": {
"label": "Object specific max frames",
"description": "Per-object overrides for maximum frames to track stationary objects."
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
}
},
"face_recognition": {
"label": "Face recognition config",
"description": "Face detection and recognition settings for this camera.",
"enabled": {
"label": "Enable face recognition",
"description": "Enable or disable face recognition processing for this camera."
},
"min_area": {
"label": "Min area of face box to consider running face recognition",
"description": "Minimum area (pixels) of a detected face box required to run recognition."
}
},
"ffmpeg": {
"label": "FFmpeg configuration for the camera",
"path": {
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use for this camera, or a version alias."
},
"global_args": {
"label": "Global FFmpeg arguments",
"description": "Global arguments passed to FFmpeg for this camera."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments",
"description": "Hardware acceleration arguments to enable GPU/video decoding for this camera."
},
"input_args": {
"label": "FFmpeg input arguments",
"description": "Input-specific FFmpeg args for connecting to the camera stream."
},
"output_args": {
"label": "FFmpeg output arguments per role",
"description": "Output args for FFmpeg by role (detect, record) to control pixel format, threads, and containers.",
"detect": {
"label": "Detect role FFmpeg output arguments",
"description": "FFmpeg output args used for the detect role."
},
"record": {
"label": "Record role FFmpeg output arguments",
"description": "FFmpeg output args used for the record role."
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera",
"description": "Seconds to wait before retrying a camera stream connection; higher values reduce retry churn."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players",
"description": "Enable tagging on H.265 record streams to improve playback compatibility on Apple devices."
},
"gpu": {
"label": "GPU index to use for hardware acceleration",
"description": "Index of the GPU to use for hardware acceleration for this camera."
},
"inputs": {
"label": "Camera inputs",
"description": "List of input stream definitions (paths and roles) for this camera."
}
},
"live": {
"label": "Live playback settings",
"description": "Live view configuration used by the Web UI for this camera.",
"streams": {
"label": "Friendly names and restream names to use for live view",
"description": "Mapping of stream roles to go2rtc/restream names for live playback."
},
"height": {
"label": "Live camera view height",
"description": "Height (pixels) of the jsmpeg live view; must be less than or equal to the detect stream height."
},
"quality": {
"label": "Live camera view quality",
"description": "Encoding quality for live view (1 highest, 31 lowest)."
}
},
"lpr": {
"label": "LPR config",
"description": "License plate recognition settings for this camera.",
"enabled": {
"label": "Enable license plate recognition",
"description": "Enable or disable automatic license plate detection/recognition for this camera."
},
"expire_time": {
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)",
"description": "Seconds after which unseen plates recorded by this LPR camera are expired."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition",
"description": "Minimum detected plate area (pixels) required to run recognition."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition",
"description": "Level of image enhancement (0-10) applied to plate crops prior to recognition."
}
},
"motion": {
"label": "Motion detection configuration",
"enabled": {
"label": "Enable motion on all cameras"
},
"threshold": {
"label": "Motion detection threshold (1-255)"
},
"lightning_threshold": {
"label": "Lightning detection threshold (0.3-1.0)"
},
"improve_contrast": {
"label": "Improve Contrast"
},
"contour_area": {
"label": "Contour Area"
},
"delta_alpha": {
"label": "Delta Alpha"
},
"frame_alpha": {
"label": "Frame Alpha"
},
"frame_height": {
"label": "Frame Height"
},
"mask": {
"label": "Coordinates polygon for the motion mask"
},
"mqtt_off_delay": {
"label": "Delay for updating MQTT with no motion detected"
},
"enabled_in_config": {
"label": "Keep track of original state of motion detection"
}
},
"objects": {
"label": "Object configuration",
"track": {
"label": "Objects to track"
},
"filters": {
"label": "Object filters",
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Minimum area required for detections to be considered for this object filter. Can be pixels or percentage of the frame."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Maximum area allowed for detections to be considered by this object filter."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted",
"description": "Minimum width/height ratio for the bounding box to qualify for this object filter."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted",
"description": "Maximum width/height ratio for the bounding box to qualify for this object filter."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted",
"description": "Average detection confidence required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted",
"description": "Minimum single-detection confidence required for the object to be considered."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration",
"description": "Polygon coordinates specifying where this filter should be applied."
}
},
"mask": {
"label": "Object mask"
},
"genai": {
"label": "Config for using genai to analyze objects",
"description": "Settings that control when and how generative AI is used to describe tracked objects for this camera.",
"enabled": {
"label": "Enable GenAI for camera",
"description": "Enable GenAI generation of descriptions for tracked objects on this camera."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions",
"description": "Use full-size object snapshots (instead of thumbnails) when generating descriptions."
},
"prompt": {
"label": "Default caption prompt",
"description": "Default prompt template used when generating object descriptions with GenAI."
},
"object_prompts": {
"label": "Object specific prompts",
"description": "Per-object custom prompts to tailor GenAI descriptions for specific labels."
},
"objects": {
"label": "List of objects to run generative AI for",
"description": "List of object labels for which GenAI descriptions should be generated."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI",
"description": "Zones an object must enter to qualify for GenAI description generation."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes",
"description": "Save thumbnails that are sent to GenAI for debugging and review."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object",
"description": "Which events or conditions should cause frames to be sent to the GenAI provider for a tracked object.",
"tracked_object_end": {
"label": "Send once the object is no longer tracked",
"description": "Send a description request when the tracked object ends."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated",
"description": "Send a GenAI request after a specified number of significant updates for the tracked object."
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI",
"description": "Indicates whether GenAI was enabled for this camera in the original static config."
}
}
},
"record": {
"label": "Record configuration",
"description": "Recording settings including retention, pre/post capture, previews and export options for this camera.",
"enabled": {
"label": "Enable record on all cameras",
"description": "Enable or disable recording for this camera. If disabled, recording cannot be turned on from the UI."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs",
"description": "Interval in minutes between cleanup runs that remove expired recording segments."
},
"continuous": {
"label": "Continuous recording retention settings",
"days": {
"label": "Default retention period"
}
},
"motion": {
"label": "Motion recording retention settings",
"days": {
"label": "Default retention period"
}
},
"detections": {
"label": "Detection specific retention settings",
"pre_capture": {
"label": "Seconds to retain before event starts"
},
"post_capture": {
"label": "Seconds to retain after event ends"
},
"retain": {
"label": "Event retention settings",
"days": {
"label": "Default retention period"
},
"mode": {
"label": "Retain mode"
}
}
},
"alerts": {
"label": "Alert specific retention settings",
"pre_capture": {
"label": "Seconds to retain before event starts"
},
"post_capture": {
"label": "Seconds to retain after event ends"
},
"retain": {
"label": "Event retention settings",
"days": {
"label": "Default retention period"
},
"mode": {
"label": "Retain mode"
}
}
},
"export": {
"label": "Recording Export Config",
"hwaccel_args": {
"label": "Export-specific FFmpeg hardware acceleration arguments"
}
},
"preview": {
"label": "Recording Preview Config",
"quality": {
"label": "Quality of recording preview"
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording",
"description": "Indicates whether recording was enabled in the original static config for this camera."
}
},
"review": {
"label": "Review configuration",
"description": "Per-camera settings that control alerts, detections, and GenAI-assisted review summaries.",
"alerts": {
"label": "Review alerts config",
"description": "Settings that determine which events are saved as alerts for this camera.",
"enabled": {
"label": "Enable alerts",
"description": "Enable or disable alert generation for this camera."
},
"labels": {
"label": "Labels to create alerts for",
"description": "List of labels that should be considered alert-worthy for this camera."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert",
"description": "Zones that an object must enter to be considered an alert for this camera."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts",
"description": "Indicates whether alerts were originally enabled in the static config for this camera."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert."
}
},
"detections": {
"label": "Review detections config",
"description": "Settings for non-alert detections and how they are retained for this camera.",
"enabled": {
"label": "Enable detections",
"description": "Enable or disable detection events for this camera."
},
"labels": {
"label": "Labels to create detections for",
"description": "List of labels that should be recorded as detections for this camera."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection",
"description": "Zones that an object must enter to be considered a detection for this camera."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection."
},
"enabled_in_config": {
"label": "Keep track of original state of detections",
"description": "Indicates whether detections were originally enabled in the static config for this camera."
}
},
"genai": {
"label": "Review description genai config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items for this camera.",
"enabled": {
"label": "Enable GenAI descriptions for review items",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items."
},
"alerts": {
"label": "Enable GenAI for alerts",
"description": "Use GenAI to generate descriptions for alert items on this camera."
},
"detections": {
"label": "Enable GenAI for detections",
"description": "Use GenAI to generate descriptions for detection items on this camera."
},
"image_source": {
"label": "Image source for review descriptions",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); recordings use higher quality frames but more tokens."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera",
"description": "Extra context or concerns that should be included when generating GenAI summaries."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes",
"description": "Save thumbnails that are sent to GenAI for debugging and review."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI",
"description": "Indicates whether GenAI review was originally enabled in the static config for this camera."
},
"preferred_language": {
"label": "Preferred language for GenAI Response",
"description": "Preferred language to request from the GenAI provider for generated responses."
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal and suspicious activity patterns for this property",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
}
}
},
"semantic_search": {
"label": "Semantic search configuration",
"description": "Per-camera semantic search trigger configuration to run actions when matches are found.",
"triggers": {
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions",
"description": "Actions and matching criteria for camera-specific semantic search triggers.",
"friendly_name": {
"label": "Trigger friendly name used in the Frigate UI"
},
"enabled": {
"label": "Enable this trigger"
},
"type": {
"label": "Type of trigger"
},
"data": {
"label": "Trigger content (text phrase or image ID)"
},
"threshold": {
"label": "Confidence score required to run the trigger"
},
"actions": {
"label": "Actions to perform when trigger is matched"
}
}
},
"snapshots": {
"label": "Snapshot configuration",
"enabled": {
"label": "Snapshots enabled",
"description": "Enable writing JPEG snapshots for tracked objects for this camera."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image",
"description": "Save an unannotated copy of each snapshot in addition to any annotated versions."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot",
"description": "Overlay a timestamp on saved snapshots for easier review."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot",
"description": "Draw a bounding box on saved snapshots around the tracked object."
},
"crop": {
"label": "Crop the snapshot to the detected object",
"description": "Save snapshots cropped to the detected object's bounding box."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot",
"description": "Zones that an object must enter to cause a snapshot to be saved for this camera."
},
"height": {
"label": "Snapshot image height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to keep original size."
},
"retain": {
"label": "Snapshot retention",
"default": {
"label": "Default retention period"
},
"mode": {
"label": "Retain mode"
},
"objects": {
"label": "Object retention period"
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)"
}
},
"timestamp_style": {
"label": "Timestamp style configuration",
"description": "Style and formatting options for in-feed timestamps applied to recordings and snapshots.",
"position": {
"label": "Timestamp position"
},
"format": {
"label": "Timestamp format"
},
"color": {
"label": "Timestamp color",
"red": {
"label": "Red"
},
"green": {
"label": "Green"
},
"blue": {
"label": "Blue"
}
},
"thickness": {
"label": "Timestamp thickness"
},
"effect": {
"label": "Timestamp effect"
}
},
"best_image_timeout": {
"label": "How long to wait for the image with the highest confidence score",
"description": "Time in seconds to wait before allowing a newer image to replace the highest-confidence image for a tracked object."
},
"mqtt": {
"label": "MQTT configuration",
"enabled": {
"label": "Send image over MQTT",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera."
},
"timestamp": {
"label": "Add timestamp to MQTT image",
"description": "Overlay a timestamp on images published to MQTT."
},
"bounding_box": {
"label": "Add bounding box to MQTT image",
"description": "Draw bounding boxes on images published over MQTT."
},
"crop": {
"label": "Crop MQTT image to detected object",
"description": "Crop images published to MQTT to the detected object's bounding box."
},
"height": {
"label": "MQTT image height",
"description": "Height (pixels) to resize images published over MQTT."
},
"required_zones": {
"label": "List of required zones to be entered in order to send the image",
"description": "Zones that an object must enter for an MQTT image to be published."
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)",
"description": "JPEG quality for images published to MQTT (0-100)."
}
},
"notifications": {
"label": "Notifications configuration",
"enabled": {
"label": "Enable notifications",
"description": "Enable or disable notifications (push/email) for this camera."
},
"email": {
"label": "Email required for push",
"description": "Email address used for push notification delivery if required."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)",
"description": "Cooldown period (seconds) between notifications to avoid spamming."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications",
"description": "Indicates whether notifications were enabled in the original static config for this camera."
}
},
"onvif": {
"label": "Camera Onvif Configuration",
"host": {
"label": "Onvif Host",
"description": "Host (and optional scheme) for the ONVIF service for this camera."
},
"port": {
"label": "Onvif Port",
"description": "Port number for the ONVIF service."
},
"user": {
"label": "Onvif Username",
"description": "Username for ONVIF authentication; some devices require admin user for ONVIF."
},
"password": {
"label": "Onvif Password",
"description": "Password for ONVIF authentication."
},
"tls_insecure": {
"label": "Onvif Disable TLS verification",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"autotracking": {
"label": "PTZ auto tracking config",
"enabled": {
"label": "Enable PTZ object autotracking"
},
"calibrate_on_startup": {
"label": "Perform a camera calibration when Frigate starts"
},
"zooming": {
"label": "Autotracker zooming mode"
},
"zoom_factor": {
"label": "Zooming factor (0.1-0.75)"
},
"track": {
"label": "Objects to track"
},
"required_zones": {
"label": "List of required zones to be entered in order to begin autotracking"
},
"return_preset": {
"label": "Name of camera preset to return to when object tracking is over"
},
"timeout": {
"label": "Seconds to delay before returning to preset"
},
"movement_weights": {
"label": "Internal value used for PTZ movements based on the speed of your camera's motor"
},
"enabled_in_config": {
"label": "Keep track of original state of autotracking"
}
},
"ignore_time_mismatch": {
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server"
}
},
"type": {
"label": "Camera Type",
"description": "Type of camera for Frigate features (for example 'generic' or 'lpr')."
},
"ui": {
"label": "Camera UI Modifications",
"order": {
"label": "Order of camera in UI",
"description": "Numeric order used to sort the camera in the UI; larger numbers appear later."
},
"dashboard": {
"label": "Show this camera in Frigate dashboard UI",
"description": "Toggle whether this camera is visible in the main dashboard."
}
},
"webui_url": {
"label": "URL to visit the camera directly from system page",
"description": "Optional URL that links directly to the camera's native web UI from the system page."
},
"zones": {
"label": "Zone configuration",
"friendly_name": {
"label": "Zone friendly name used in the Frigate UI",
"description": "Human-friendly name for the zone displayed in the UI."
},
"filters": {
"label": "Zone filters",
"description": "Per-zone filters that further restrict which detections qualify inside this zone.",
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Minimum area for detections inside the zone to be considered (pixels or percentage)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Maximum area for detections inside the zone to be considered."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted",
"description": "Minimum width/height ratio for detections inside the zone to be considered."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted",
"description": "Maximum width/height ratio for detections inside the zone to be considered."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted",
"description": "Average confidence threshold for detections inside the zone to be considered."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted",
"description": "Minimum single detection confidence required for objects inside the zone."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration",
"description": "Polygon coordinates to further limit where this zone filter applies."
}
},
"coordinates": {
"label": "Coordinates polygon for the defined zone",
"description": "Ordered x,y coordinates defining the zone polygon used for zone membership checks."
},
"distances": {
"label": "Real-world distances for the sides of quadrilateral for the defined zone",
"description": "Real-world distances corresponding to the zone corners used for speed estimation."
},
"inertia": {
"label": "Number of consecutive frames required for object to be considered present in the zone",
"description": "Number of consecutive frames an object must be present to be considered in the zone."
},
"loitering_time": {
"label": "Number of seconds that an object must loiter to be considered in the zone",
"description": "Number of seconds an object must remain in the zone to be considered loitering."
},
"speed_threshold": {
"label": "Minimum speed value for an object to be considered in the zone",
"description": "Minimum estimated speed required for an object to count for speed-based zone logic."
},
"objects": {
"label": "List of objects that can trigger the zone",
"description": "List of object labels allowed to trigger zone-related events."
}
},
"enabled_in_config": {
"label": "Keep track of original state of camera",
"description": "Indicates whether this camera was enabled in the original static configuration."
}
}

View File

@ -1,5 +1,5 @@
{
"label": "Object classification config",
"label": "Object classification",
"description": "Settings for classification models used to refine object labels or state classification.",
"bird": {
"label": "Bird classification config",
@ -14,7 +14,7 @@
}
},
"custom": {
"label": "Custom Classification Model Configs",
"label": "Custom Classification Models",
"description": "Configuration for custom classification models used for objects or state detection.",
"enabled": {
"label": "Enable running the model",

View File

@ -1,5 +1,5 @@
{
"label": "Database configuration",
"label": "Database",
"description": "Settings for the SQLite database used by Frigate to store events and metadata.",
"path": {
"label": "Database path",

View File

@ -1,5 +1,5 @@
{
"label": "Detector hardware configuration",
"label": "Detector hardware",
"description": "Configuration for object detectors (CPU, EdgeTPU, GPU backends) and any detector-specific model settings.",
"type": {
"label": "Detector Type",

View File

@ -1,5 +1,5 @@
{
"label": "Face recognition config",
"label": "Face recognition",
"description": "Global settings for face detection and recognition used across cameras unless overridden per-camera.",
"enabled": {
"label": "Enable face recognition",

View File

@ -1,21 +1,21 @@
{
"label": "Global FFmpeg configuration",
"description": "Global FFmpeg settings including binary path, global args, hwaccel options, and per-role output args.",
"label": "FFmpeg",
"description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
"path": {
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias."
},
"global_args": {
"label": "Global FFmpeg arguments",
"description": "Global args passed to FFmpeg processes by default."
"label": "FFmpeg arguments",
"description": "args passed to FFmpeg processes by default."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments",
"description": "Global hardware acceleration arguments for FFmpeg (auto or provider-specific)."
"description": "hardware acceleration arguments for FFmpeg (auto or provider-specific)."
},
"input_args": {
"label": "FFmpeg input arguments",
"description": "Global input arguments applied to FFmpeg input streams by default."
"description": "input arguments applied to FFmpeg input streams by default."
},
"output_args": {
"label": "FFmpeg output arguments per role",
@ -41,8 +41,8 @@
"description": "Roles for this input stream (for example: detect, record, audio)."
},
"global_args": {
"label": "FFmpeg global arguments",
"description": "Global FFmpeg arguments for this input stream."
"label": "FFmpeg arguments",
"description": "FFmpeg arguments for this input stream."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments",

View File

@ -1,4 +1,4 @@
{
"label": "Global restream configuration",
"label": "go2rtc",
"description": "Settings for the integrated go2rtc restreaming service used for live stream relaying and translation."
}

View File

@ -1,5 +1,5 @@
{
"label": "Live playback settings",
"label": "Live playback",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.",
"streams": {
"label": "Friendly names and restream names to use for live view",

View File

@ -1,5 +1,5 @@
{
"label": "Logging configuration",
"label": "Logging",
"description": "Controls default log verbosity and per-component log level overrides.",
"default": {
"label": "Default logging level",

View File

@ -1,6 +1,6 @@
{
"label": "License Plate recognition config",
"description": "Global license plate recognition settings including detection thresholds, formatting, and known plates.",
"label": "License Plate Recognition",
"description": "License plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": {
"label": "Enable license plate recognition",
"description": "Enable or disable LPR globally; camera-level settings can override."

View File

@ -1,5 +1,5 @@
{
"label": "Detection model configuration",
"label": "Detection model",
"description": "Settings to configure a custom object detection model, its input shape, and labelmap overrides.",
"path": {
"label": "Custom Object detection model path",

View File

@ -1,5 +1,5 @@
{
"label": "Global motion detection configuration",
"label": "Motion detection",
"description": "Default motion detection settings applied to cameras unless overridden per-camera.",
"groups": {
"sensitivity": "Sensitivity",

View File

@ -1,5 +1,5 @@
{
"label": "MQTT configuration",
"label": "MQTT",
"description": "Settings for connecting and publishing telemetry, snapshots, and events to an MQTT broker.",
"enabled": {
"label": "Enable MQTT Communication",

View File

@ -1,8 +1,8 @@
{
"label": "Networking configuration",
"label": "Networking",
"description": "Network-related settings such as IPv6 enablement for Frigate endpoints.",
"ipv6": {
"label": "IPv6 configuration",
"label": "IPv6 IPv6 settings",
"description": "IPv6-specific settings for Frigate network services.",
"enabled": {
"label": "Enable IPv6 for port 5000 and/or 8971",

View File

@ -1,6 +1,6 @@
{
"label": "Global notification configuration",
"description": "Global settings to enable and control notifications; can be overridden per-camera.",
"label": "Notifications",
"description": "Settings to enable and control notifications; can be overridden per-camera.",
"enabled": {
"label": "Enable notifications",
"description": "Enable or disable notifications globally."

View File

@ -1,6 +1,6 @@
{
"label": "Global object configuration",
"description": "Global object tracking defaults including which labels to track and per-object filters.",
"label": "Objects",
"description": "Object tracking defaults including which labels to track and per-object filters.",
"groups": {
"tracking": "Tracking",
"filtering": "Filtering"
@ -43,11 +43,11 @@
},
"mask": {
"label": "Object mask",
"description": "Global mask polygon used to prevent object detection in specified areas."
"description": "Mask polygon used to prevent object detection in specified areas."
},
"genai": {
"label": "Config for using genai to analyze objects",
"description": "Global GenAI options for describing tracked objects and sending frames for generation.",
"description": "GenAI options for describing tracked objects and sending frames for generation.",
"enabled": {
"label": "Enable GenAI for camera",
"description": "Enable GenAI generation of descriptions for tracked objects by default."

View File

@ -1,5 +1,5 @@
{
"label": "Camera Onvif Configuration",
"label": "ONVIF",
"description": "ONVIF connection and PTZ autotracking settings for this camera.",
"host": {
"label": "Onvif Host",

View File

@ -1,5 +1,5 @@
{
"label": "Proxy configuration",
"label": "Proxy",
"description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
"header_map": {
"label": "Header mapping definitions for proxy user passing",

View File

@ -1,6 +1,6 @@
{
"label": "Global record configuration",
"description": "Global recording and retention settings applied to cameras unless overridden per-camera.",
"label": "Recording",
"description": "Recording and retention settings applied to cameras unless overridden per-camera.",
"groups": {
"retention": "Retention",
"events": "Events"

View File

@ -1,5 +1,5 @@
{
"label": "Review configuration",
"label": "Review",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
"alerts": {
"label": "Review alerts config",

View File

@ -1,5 +1,5 @@
{
"label": "Semantic search configuration",
"label": "Semantic Search",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.",
"enabled": {
"label": "Enable semantic search",

View File

@ -1,6 +1,6 @@
{
"label": "Global snapshots configuration",
"description": "Global settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
"label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
"groups": {
"display": "Display"
},

View File

@ -1,12 +1,12 @@
{
"label": "Telemetry configuration",
"label": "Telemetry",
"description": "System telemetry and stats options including GPU and network bandwidth monitoring.",
"network_interfaces": {
"label": "Enabled network interfaces for bandwidth calculation",
"description": "List of network interface name prefixes to monitor for bandwidth statistics."
},
"stats": {
"label": "System Stats Configuration",
"label": "System Stats",
"description": "Options to enable/disable collection of various system and GPU statistics.",
"amd_gpu_stats": {
"label": "Enable AMD GPU stats",

View File

@ -1,6 +1,6 @@
{
"label": "Global timestamp style configuration",
"description": "Global styling options for in-feed timestamps applied to recordings and snapshots.",
"label": "Timestamp style",
"description": "Styling options for in-feed timestamps applied to recordings and snapshots.",
"groups": {
"appearance": "Appearance"
},

View File

@ -1,5 +1,5 @@
{
"label": "TLS configuration",
"label": "TLS",
"description": "TLS settings for Frigate's web endpoints (port 8971).",
"enabled": {
"label": "Enable TLS for port 8971",

View File

@ -1,5 +1,5 @@
{
"label": "UI configuration",
"label": "UI",
"description": "User interface preferences such as timezone, time/date formatting, and units.",
"timezone": {
"label": "Override UI timezone",