new titles as i18n

This commit is contained in:
Josh Hawkins 2026-01-29 09:43:43 -06:00
parent 13b9d1b6e3
commit 30db891837
34 changed files with 1288 additions and 306 deletions

View File

@ -1,20 +1,20 @@
{
"label": "Audio events",
"description": "settings for audio-based event detection; camera-level settings can override these.",
"description": "Settings for audio-based event detection; can be overridden per-camera.",
"enabled": {
"label": "Enable audio events",
"description": "Enable or disable audio event detection globally. Can be overridden per camera."
"label": "Enable audio",
"description": "Enable or disable audio event detection; can be overridden per-camera."
},
"max_not_heard": {
"label": "Seconds of not hearing the type of audio to end the event",
"label": "End timeout",
"description": "Amount of seconds without the configured audio type before the audio event is ended."
},
"min_volume": {
"label": "Min volume required to run audio detection",
"label": "Minimum volume",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)."
},
"listen": {
"label": "Audio to listen for",
"label": "Listen types",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
},
"filters": {
@ -22,11 +22,11 @@
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
},
"enabled_in_config": {
"label": "Keep track of original state of audio detection",
"label": "Original audio state",
"description": "Indicates whether audio detection was originally enabled in the static config file."
},
"num_threads": {
"label": "Number of detection threads",
"label": "Detection threads",
"description": "Number of threads to use for audio detection processing."
}
}

View File

@ -6,19 +6,19 @@
"description": "Enable or disable automatic audio transcription globally."
},
"language": {
"label": "Language abbreviation to use for audio event transcription/translation",
"label": "Transcription language",
"description": "Language code used for transcription/translation (for example 'en' for English)."
},
"device": {
"label": "The device used for audio transcription",
"label": "Transcription device",
"description": "Device key (CPU/GPU) to run the transcription model on."
},
"model_size": {
"label": "The size of the embeddings model used",
"description": "Model size to use for transcription; smaller models run on CPU, larger models may need GPU."
"label": "Model size",
"description": "Model size to use for transcription; the small model runs on CPU, large model requires a GPU."
},
"live_enabled": {
"label": "Enable live transcriptions",
"label": "Live transcription",
"description": "Enable streaming live transcription for audio as it is received."
}
}

View File

@ -6,43 +6,43 @@
"description": "Enable native authentication for the Frigate UI."
},
"reset_admin_password": {
"label": "Reset the admin password on startup",
"label": "Reset admin password",
"description": "If true, reset the admin user's password on startup and print the new password in logs."
},
"cookie_name": {
"label": "Name for jwt token cookie",
"label": "JWT cookie name",
"description": "Name of the cookie used to store the JWT token for native authentication."
},
"cookie_secure": {
"label": "Set secure flag on cookie",
"label": "Secure cookie flag",
"description": "Set the secure flag on the auth cookie; should be true when using TLS."
},
"session_length": {
"label": "Session length for jwt session tokens",
"label": "Session length",
"description": "Session duration in seconds for JWT-based sessions."
},
"refresh_time": {
"label": "Refresh the session if it is going to expire in this many seconds",
"label": "Session refresh window",
"description": "When a session is within this many seconds of expiring, refresh it back to full length."
},
"failed_login_rate_limit": {
"label": "Rate limits for failed login attempts",
"label": "Failed login limits",
"description": "Rate limiting rules for failed login attempts to reduce brute-force attacks."
},
"trusted_proxies": {
"label": "Trusted proxies for determining IP address to rate limit",
"label": "Trusted proxies",
"description": "List of trusted proxy IPs used when determining client IP for rate limiting."
},
"hash_iterations": {
"label": "Password hash iterations",
"label": "Hash iterations",
"description": "Number of PBKDF2-SHA256 iterations to use when hashing user passwords."
},
"roles": {
"label": "Role to camera mappings. Empty list grants access to all cameras",
"label": "Role mappings",
"description": "Map roles to camera lists. An empty list grants access to all cameras for the role."
},
"admin_first_time_login": {
"label": "Internal field to expose first-time admin login flag to the UI",
"label": "First-time admin flag",
"description": "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
}
}

View File

@ -2,27 +2,23 @@
"label": "Birdseye",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": {
"label": "Enable birdseye view",
"label": "Enable Birdseye",
"description": "Enable or disable the Birdseye view feature."
},
"mode": {
"label": "Tracking mode",
"description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'."
},
"order": {
"label": "Position of the camera in the birdseye view",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
},
"restream": {
"label": "Restream birdseye via RTSP",
"label": "Restream RTSP",
"description": "Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously."
},
"width": {
"label": "Birdseye width",
"label": "Width",
"description": "Output width (pixels) of the composed Birdseye frame."
},
"height": {
"label": "Birdseye height",
"label": "Height",
"description": "Output height (pixels) of the composed Birdseye frame."
},
"quality": {
@ -30,14 +26,14 @@
"description": "Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest)."
},
"inactivity_threshold": {
"label": "Birdseye Inactivity Threshold",
"label": "Inactivity threshold",
"description": "Seconds of inactivity after which a camera will stop being shown in Birdseye."
},
"layout": {
"label": "Birdseye Layout",
"label": "Layout",
"description": "Layout options for the Birdseye composition.",
"scaling_factor": {
"label": "Birdseye Scaling Factor",
"label": "Scaling factor",
"description": "Scaling factor used by the layout calculator (range 1.0 to 5.0)."
},
"max_cameras": {
@ -46,7 +42,11 @@
}
},
"idle_heartbeat_fps": {
"label": "Idle heartbeat FPS (0 disables, max 10)",
"label": "Idle heartbeat FPS",
"description": "Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable."
},
"order": {
"label": "Position",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
}
}

View File

@ -2,15 +2,15 @@
"label": "Camera groups",
"description": "Configuration for named camera groups used to organize cameras in the UI.",
"cameras": {
"label": "List of cameras in this group",
"label": "Camera list",
"description": "Array of camera names included in this group."
},
"icon": {
"label": "Icon that represents camera group",
"label": "Group icon",
"description": "Icon used to represent the camera group in the UI."
},
"order": {
"label": "Sort order for group",
"label": "Sort order",
"description": "Numeric order used to sort camera groups in the UI; larger numbers appear later."
}
}

View File

@ -2,31 +2,31 @@
"label": "MQTT",
"description": "MQTT image publishing settings.",
"enabled": {
"label": "Send image over MQTT",
"label": "Send image",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera."
},
"timestamp": {
"label": "Add timestamp to MQTT image",
"label": "Add timestamp",
"description": "Overlay a timestamp on images published to MQTT."
},
"bounding_box": {
"label": "Add bounding box to MQTT image",
"label": "Add bounding box",
"description": "Draw bounding boxes on images published over MQTT."
},
"crop": {
"label": "Crop MQTT image to detected object",
"label": "Crop image",
"description": "Crop images published to MQTT to the detected object's bounding box."
},
"height": {
"label": "MQTT image height",
"label": "Image height",
"description": "Height (pixels) to resize images published over MQTT."
},
"required_zones": {
"label": "List of required zones to be entered in order to send the image",
"label": "Required zones",
"description": "Zones that an object must enter for an MQTT image to be published."
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)",
"label": "JPEG quality",
"description": "JPEG quality for images published to MQTT (0-100)."
}
}

View File

@ -2,11 +2,11 @@
"label": "Camera UI",
"description": "Display ordering and dashboard visibility for this camera in the UI.",
"order": {
"label": "Order of camera in UI",
"label": "UI order",
"description": "Numeric order used to sort the camera in the UI; larger numbers appear later."
},
"dashboard": {
"label": "Show this camera in Frigate dashboard UI",
"label": "Show in dashboard",
"description": "Toggle whether this camera is visible in the main dashboard."
}
}

View File

@ -0,0 +1,928 @@
{
"label": "Cameras",
"description": "Cameras",
"name": {
"label": "Camera name",
"description": "Camera name is required"
},
"friendly_name": {
"label": "Friendly name",
"description": "Camera friendly name used in the Frigate UI"
},
"enabled": {
"label": "Enabled",
"description": "Enabled"
},
"audio": {
"label": "Audio events",
"description": "Settings for audio-based event detection; can be overridden per-camera.",
"enabled": {
"label": "Enable audio",
"description": "Enable or disable audio event detection; can be overridden per-camera."
},
"max_not_heard": {
"label": "End timeout",
"description": "Amount of seconds without the configured audio type before the audio event is ended."
},
"min_volume": {
"label": "Minimum volume",
"description": "Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low)."
},
"listen": {
"label": "Listen types",
"description": "List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell)."
},
"filters": {
"label": "Audio filters",
"description": "Per-audio-type filter settings such as confidence thresholds used to reduce false positives."
},
"enabled_in_config": {
"label": "Original audio state",
"description": "Indicates whether audio detection was originally enabled in the static config file."
},
"num_threads": {
"label": "Detection threads",
"description": "Number of threads to use for audio detection processing."
}
},
"audio_transcription": {
"label": "Audio transcription",
"description": "Settings for live and speech audio transcription used for events and live captions.",
"enabled": {
"label": "Enable transcription",
"description": "Enable or disable automatic audio transcription."
},
"enabled_in_config": {
"label": "Original transcription state"
},
"live_enabled": {
"label": "Live transcription",
"description": "Enable streaming live transcription for audio as it is received."
}
},
"birdseye": {
"label": "Birdseye",
"description": "Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
"enabled": {
"label": "Enable Birdseye",
"description": "Enable or disable the Birdseye view feature."
},
"mode": {
"label": "Tracking mode",
"description": "Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'."
},
"order": {
"label": "Position",
"description": "Numeric position controlling the camera's ordering in the Birdseye layout."
}
},
"detect": {
"label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
},
"height": {
"label": "Detect height",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"width": {
"label": "Detect width",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"fps": {
"label": "Detect FPS",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)."
},
"min_initialized": {
"label": "Min initialization hits",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2."
},
"max_disappeared": {
"label": "Max disappeared frames",
"description": "Number of frames without a detection before a tracked object is considered gone."
},
"stationary": {
"label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": {
"label": "Stationary interval",
"description": "How often (in frames) to run a detection check to confirm a stationary object."
},
"threshold": {
"label": "Stationary threshold",
"description": "Number of frames with no position change required to mark an object as stationary."
},
"max_frames": {
"label": "Max frames",
"description": "Limits how long stationary objects are tracked before being discarded.",
"default": {
"label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping."
},
"objects": {
"label": "Object max frames",
"description": "Per-object overrides for maximum frames to track stationary objects."
}
},
"classifier": {
"label": "Enable visual classifier",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
}
},
"annotation_offset": {
"label": "Annotation offset",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
}
},
"face_recognition": {
"label": "Face recognition",
"description": "Settings for face detection and recognition; can be overridden per-camera.",
"enabled": {
"label": "Enable face recognition",
"description": "Enable or disable face recognition globally."
},
"min_area": {
"label": "Minimum face area",
"description": "Minimum area (pixels) of a detected face box required to attempt recognition."
}
},
"ffmpeg": {
"label": "FFmpeg",
"description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
"path": {
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias (\"5.0\" or \"7.0\")."
},
"global_args": {
"label": "FFmpeg global args",
"description": "Global args passed to FFmpeg processes by default."
},
"hwaccel_args": {
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for FFmpeg (auto or provider-specific)."
},
"input_args": {
"label": "Input args",
"description": "Input arguments applied to FFmpeg input streams by default."
},
"output_args": {
"label": "Output args",
"description": "Default output args used for different FFmpeg roles such as detect and record.",
"detect": {
"label": "Detect output args",
"description": "Default output args for detect role streams."
},
"record": {
"label": "Record output args",
"description": "Default output args for record role streams."
}
},
"retry_interval": {
"label": "FFmpeg retry time",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10."
},
"apple_compatibility": {
"label": "Apple compatibility",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index",
"description": "Default GPU index used for hardware acceleration if available."
},
"inputs": {
"label": "Camera inputs",
"description": "List of input stream definitions (paths and roles) for this camera.",
"path": {
"label": "Input path",
"description": "Camera input stream URL or path."
},
"roles": {
"label": "Input roles",
"description": "Roles for this input stream (for example: detect, record, audio)."
},
"global_args": {
"label": "FFmpeg args",
"description": "FFmpeg arguments for this input stream."
},
"hwaccel_args": {
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for this input stream."
},
"input_args": {
"label": "Input args",
"description": "Input arguments specific to this stream."
}
}
},
"live": {
"label": "Live playback",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.",
"streams": {
"label": "Live stream names",
"description": "Mapping of configured stream names to restream/go2rtc names used for live playback."
},
"height": {
"label": "Live height",
"description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height."
},
"quality": {
"label": "Live quality",
"description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)."
}
},
"lpr": {
"label": "License Plate Recognition",
"description": "License plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": {
"label": "Enable LPR",
"description": "Enable or disable LPR globally; camera-level settings can override."
},
"expire_time": {
"label": "Expire seconds",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
},
"min_area": {
"label": "Minimum plate area",
"description": "Minimum plate area (pixels) required to attempt recognition."
},
"enhancement": {
"label": "Enhancement level",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution."
}
},
"motion": {
"label": "Motion detection",
"description": "Default motion detection settings; can be overridden per-camera.",
"enabled": {
"label": "Enable motion detection",
"description": "Enable or disable motion detection globally; per-camera settings can override this."
},
"threshold": {
"label": "Motion threshold",
"description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)."
},
"lightning_threshold": {
"label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
},
"improve_contrast": {
"label": "Improve contrast",
"description": "Apply contrast improvement to frames before motion analysis to help detection."
},
"contour_area": {
"label": "Contour area",
"description": "Minimum contour area in pixels required for a motion contour to be counted."
},
"delta_alpha": {
"label": "Delta alpha",
"description": "Alpha blending factor used in frame differencing for motion calculation."
},
"frame_alpha": {
"label": "Frame alpha",
"description": "Alpha value used when blending frames for motion preprocessing."
},
"frame_height": {
"label": "Frame height",
"description": "Height in pixels to scale frames to when computing motion (useful for performance)."
},
"mask": {
"label": "Mask coordinates",
"description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas."
},
"mqtt_off_delay": {
"label": "MQTT off delay",
"description": "Seconds to wait after last motion before publishing an MQTT 'off' state."
},
"enabled_in_config": {
"label": "Original motion state",
"description": "Indicates whether motion detection was enabled in the original static configuration."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"objects": {
"label": "Objects",
"description": "Object tracking defaults including which labels to track and per-object filters.",
"track": {
"label": "Objects to track",
"description": "List of object labels to track globally; camera configs can override this."
},
"filters": {
"label": "Object filters",
"description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
"min_area": {
"label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"mask": {
"label": "Object mask",
"description": "Mask polygon used to prevent object detection in specified areas."
},
"genai": {
"label": "GenAI object config",
"description": "GenAI options for describing tracked objects and sending frames for generation.",
"enabled": {
"label": "Enable GenAI",
"description": "Enable GenAI generation of descriptions for tracked objects by default."
},
"use_snapshot": {
"label": "Use snapshots",
"description": "Use object snapshots instead of thumbnails for GenAI description generation."
},
"prompt": {
"label": "Caption prompt",
"description": "Default prompt template used when generating descriptions with GenAI."
},
"object_prompts": {
"label": "Object prompts",
"description": "Per-object prompts to customize GenAI outputs for specific labels."
},
"objects": {
"label": "GenAI objects",
"description": "List of object labels to send to GenAI by default."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that must be entered for objects to qualify for GenAI description generation."
},
"debug_save_thumbnails": {
"label": "Save thumbnails",
"description": "Save thumbnails sent to GenAI for debugging and review."
},
"send_triggers": {
"label": "GenAI triggers",
"description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).",
"tracked_object_end": {
"label": "Send on end",
"description": "Send a request to GenAI when the tracked object ends."
},
"after_significant_updates": {
"label": "Early GenAI trigger",
"description": "Send a request to GenAI after a specified number of significant updates for the tracked object."
}
},
"enabled_in_config": {
"label": "Original GenAI state",
"description": "Indicates whether GenAI was enabled in the original static config."
}
}
},
"record": {
"label": "Recording",
"description": "Recording and retention settings; can be overridden per-camera.",
"enabled": {
"label": "Enable recording",
"description": "Enable or disable recording globally; individual cameras can override this."
},
"expire_interval": {
"label": "Record cleanup interval",
"description": "Minutes between cleanup passes that remove expired recording segments."
},
"continuous": {
"label": "Continuous retention",
"description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"motion": {
"label": "Motion retention",
"description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"detections": {
"label": "Detection retention",
"description": "Recording retention settings for detection events including pre/post capture durations.",
"pre_capture": {
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"alerts": {
"label": "Alert retention",
"description": "Recording retention settings for alert events including pre/post capture durations.",
"pre_capture": {
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"export": {
"label": "Export config",
"description": "Settings used when exporting recordings such as timelapse and hardware acceleration.",
"hwaccel_args": {
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
}
},
"preview": {
"label": "Preview config",
"description": "Settings controlling the quality of recording previews shown in the UI.",
"quality": {
"label": "Preview quality",
"description": "Preview quality level (very_low, low, medium, high, very_high)."
}
},
"enabled_in_config": {
"label": "Original recording state",
"description": "Indicates whether recording was enabled in the original static configuration."
}
},
"review": {
"label": "Review",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage; can be overridden per-camera.",
"alerts": {
"label": "Alerts config",
"description": "Settings for which tracked objects generate alerts and how alerts are retained.",
"enabled": {
"label": "Enable alerts",
"description": "Enable or disable alert generation for this camera."
},
"labels": {
"label": "Alert labels",
"description": "List of object labels that qualify as alerts (for example: car, person)."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone."
},
"enabled_in_config": {
"label": "Original alerts state",
"description": "Tracks whether alerts were originally enabled in the static configuration."
},
"cutoff_time": {
"label": "Alerts cutoff time",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert."
}
},
"detections": {
"label": "Detections config",
"description": "Settings for creating detection events (non-alert) and how long to keep them.",
"enabled": {
"label": "Enable detections",
"description": "Enable or disable detection events for this camera."
},
"labels": {
"label": "Detection labels",
"description": "List of object labels that qualify as detection events."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone."
},
"cutoff_time": {
"label": "Detections cutoff time",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection."
},
"enabled_in_config": {
"label": "Original detections state",
"description": "Tracks whether detections were originally enabled in the static configuration."
}
},
"genai": {
"label": "GenAI config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items.",
"enabled": {
"label": "Enable GenAI descriptions",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items."
},
"alerts": {
"label": "Enable GenAI for alerts",
"description": "Use GenAI to generate descriptions for alert items."
},
"detections": {
"label": "Enable GenAI for detections",
"description": "Use GenAI to generate descriptions for detection items."
},
"image_source": {
"label": "Review image source",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens."
},
"additional_concerns": {
"label": "Additional concerns",
"description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails",
"description": "Save thumbnails that are sent to the GenAI provider for debugging and review."
},
"enabled_in_config": {
"label": "Original GenAI state",
"description": "Tracks whether GenAI review was originally enabled in the static configuration."
},
"preferred_language": {
"label": "Preferred language",
"description": "Preferred language to request from the GenAI provider for generated responses."
},
"activity_context_prompt": {
"label": "Activity context prompt",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
}
}
},
"semantic_search": {
"label": "Semantic Search",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.",
"triggers": {
"label": "Triggers",
"description": "Actions and matching criteria for camera-specific semantic search triggers.",
"friendly_name": {
"label": "Friendly name",
"description": "Optional friendly name displayed in the UI for this trigger."
},
"enabled": {
"label": "Enable this trigger",
"description": "Enable or disable this semantic search trigger."
},
"type": {
"label": "Trigger type",
"description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)."
},
"data": {
"label": "Trigger content",
"description": "Text phrase or thumbnail ID to match against tracked objects."
},
"threshold": {
"label": "Trigger threshold",
"description": "Minimum similarity score (0-1) required to activate this trigger."
},
"actions": {
"label": "Trigger actions",
"description": "List of actions to execute when trigger matches (notification, sub_label, attribute)."
}
}
},
"snapshots": {
"label": "Snapshots",
"description": "Settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
"enabled": {
"label": "Snapshots enabled",
"description": "Enable or disable saving snapshots globally."
},
"clean_copy": {
"label": "Save clean copy",
"description": "Save an unannotated clean copy of snapshots in addition to annotated ones."
},
"timestamp": {
"label": "Timestamp overlay",
"description": "Overlay a timestamp on saved snapshots."
},
"bounding_box": {
"label": "Bounding box overlay",
"description": "Draw bounding boxes for tracked objects on saved snapshots."
},
"crop": {
"label": "Crop snapshot",
"description": "Crop saved snapshots to the detected object's bounding box."
},
"required_zones": {
"label": "Required zones",
"description": "Zones an object must enter for a snapshot to be saved."
},
"height": {
"label": "Snapshot height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size."
},
"retain": {
"label": "Snapshot retention",
"description": "Retention settings for saved snapshots including default days and per-object overrides.",
"default": {
"label": "Default retention",
"description": "Default number of days to retain snapshots."
},
"mode": {
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
},
"objects": {
"label": "Object retention",
"description": "Per-object overrides for snapshot retention days."
}
},
"quality": {
"label": "JPEG quality",
"description": "JPEG encode quality for saved snapshots (0-100)."
}
},
"timestamp_style": {
"label": "Timestamp style",
"description": "Styling options for in-feed timestamps applied to recordings and snapshots.",
"position": {
"label": "Timestamp position",
"description": "Position of the timestamp on the image (tl/tr/bl/br)."
},
"format": {
"label": "Timestamp format",
"description": "Datetime format string used for timestamps (Python datetime format codes)."
},
"color": {
"label": "Timestamp color",
"description": "RGB color values for the timestamp text (all values 0-255).",
"red": {
"label": "Red",
"description": "Red component (0-255) for timestamp color."
},
"green": {
"label": "Green",
"description": "Green component (0-255) for timestamp color."
},
"blue": {
"label": "Blue",
"description": "Blue component (0-255) for timestamp color."
}
},
"thickness": {
"label": "Timestamp thickness",
"description": "Line thickness of the timestamp text."
},
"effect": {
"label": "Timestamp effect",
"description": "Visual effect for the timestamp text (none, solid, shadow)."
}
},
"best_image_timeout": {
"label": "Best image timeout",
"description": "How long to wait for the image with the highest confidence score."
},
"mqtt": {
"label": "MQTT",
"description": "MQTT image publishing settings.",
"enabled": {
"label": "Send image",
"description": "Enable publishing image snapshots for objects to MQTT topics for this camera."
},
"timestamp": {
"label": "Add timestamp",
"description": "Overlay a timestamp on images published to MQTT."
},
"bounding_box": {
"label": "Add bounding box",
"description": "Draw bounding boxes on images published over MQTT."
},
"crop": {
"label": "Crop image",
"description": "Crop images published to MQTT to the detected object's bounding box."
},
"height": {
"label": "Image height",
"description": "Height (pixels) to resize images published over MQTT."
},
"required_zones": {
"label": "Required zones",
"description": "Zones that an object must enter for an MQTT image to be published."
},
"quality": {
"label": "JPEG quality",
"description": "JPEG quality for images published to MQTT (0-100)."
}
},
"notifications": {
"label": "Notifications",
"description": "Settings to enable and control notifications; can be overridden per-camera.",
"enabled": {
"label": "Enable notifications",
"description": "Enable or disable notifications globally."
},
"email": {
"label": "Notification email",
"description": "Email address used for push notifications or required by certain notification providers."
},
"cooldown": {
"label": "Cooldown period",
"description": "Cooldown (seconds) between notifications to avoid spamming recipients."
},
"enabled_in_config": {
"label": "Original notifications state",
"description": "Indicates whether notifications were enabled in the original static configuration."
}
},
"onvif": {
"label": "ONVIF",
"description": "ONVIF connection and PTZ autotracking settings for this camera.",
"host": {
"label": "ONVIF host",
"description": "Host (and optional scheme) for the ONVIF service for this camera."
},
"port": {
"label": "ONVIF port",
"description": "Port number for the ONVIF service."
},
"user": {
"label": "ONVIF username",
"description": "Username for ONVIF authentication; some devices require admin user for ONVIF."
},
"password": {
"label": "ONVIF password",
"description": "Password for ONVIF authentication."
},
"tls_insecure": {
"label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"autotracking": {
"label": "PTZ config",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
"enabled": {
"label": "Enable Autotracking",
"description": "Enable or disable automatic PTZ camera tracking of detected objects."
},
"calibrate_on_startup": {
"label": "Calibrate on start",
"description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration."
},
"zooming": {
"label": "Zoom mode",
"description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)."
},
"zoom_factor": {
"label": "Zoom factor",
"description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75."
},
"track": {
"label": "Tracked objects",
"description": "List of object types that should trigger autotracking."
},
"required_zones": {
"label": "Required zones",
"description": "Objects must enter one of these zones before autotracking begins."
},
"return_preset": {
"label": "Return preset",
"description": "ONVIF preset name configured in camera firmware to return to after tracking ends."
},
"timeout": {
"label": "Return timeout",
"description": "Wait this many seconds after losing tracking before returning camera to preset position."
},
"movement_weights": {
"label": "Movement weights",
"description": "Calibration values automatically generated by camera calibration. Do not modify manually."
},
"enabled_in_config": {
"label": "Original autotrack state",
"description": "Internal field to track whether autotracking was enabled in configuration."
}
},
"ignore_time_mismatch": {
"label": "Ignore time mismatch",
"description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication."
}
},
"type": {
"label": "Camera type",
"description": "Camera Type"
},
"ui": {
"label": "Camera UI",
"description": "Display ordering and dashboard visibility for this camera in the UI.",
"order": {
"label": "UI order",
"description": "Numeric order used to sort the camera in the UI; larger numbers appear later."
},
"dashboard": {
"label": "Show in dashboard",
"description": "Toggle whether this camera is visible in the main dashboard."
}
},
"webui_url": {
"label": "Camera URL",
"description": "URL to visit the camera directly from system page"
},
"zones": {
"label": "Zones",
"description": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
"friendly_name": {
"label": "Zone name",
"description": "A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used."
},
"filters": {
"label": "Zone filters",
"description": "Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.",
"min_area": {
"label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"coordinates": {
"label": "Coordinates",
"description": "Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy)."
},
"distances": {
"label": "Real-world distances",
"description": "Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set."
},
"inertia": {
"label": "Inertia frames",
"description": "Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections."
},
"loitering_time": {
"label": "Loitering seconds",
"description": "Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection."
},
"speed_threshold": {
"label": "Minimum speed",
"description": "Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers."
},
"objects": {
"label": "Trigger objects",
"description": "List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered."
}
},
"enabled_in_config": {
"label": "Original camera state",
"description": "Keep track of original state of camera."
}
}

View File

@ -5,11 +5,11 @@
"label": "Bird classification config",
"description": "Settings specific to bird classification models.",
"enabled": {
"label": "Enable bird classification",
"label": "Bird classification",
"description": "Enable or disable bird classification."
},
"threshold": {
"label": "Minimum classification score required to be considered a match",
"label": "Minimum score",
"description": "Minimum classification score required to accept a bird classification."
}
},
@ -17,46 +17,46 @@
"label": "Custom Classification Models",
"description": "Configuration for custom classification models used for objects or state detection.",
"enabled": {
"label": "Enable running the model",
"label": "Enable model",
"description": "Enable or disable the custom classification model."
},
"name": {
"label": "Name of classification model",
"label": "Model name",
"description": "Identifier for the custom classification model to use."
},
"threshold": {
"label": "Classification score threshold to change the state",
"label": "Score threshold",
"description": "Score threshold used to change the classification state."
},
"save_attempts": {
"label": "Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification",
"label": "Saved attempts",
"description": "How many classification attempts to save for recent classifications UI."
},
"object_config": {
"objects": {
"label": "Object types to classify",
"label": "Classify objects",
"description": "List of object types to run object classification on."
},
"classification_type": {
"label": "Type of classification that is applied",
"label": "Classification type",
"description": "Classification type applied: 'sub_label' (adds sub_label) or other supported types."
}
},
"state_config": {
"cameras": {
"label": "Cameras to run classification on",
"label": "Classification cameras",
"description": "Per-camera crop and settings for running state classification.",
"crop": {
"label": "Crop of image frame on this camera to run classification on",
"label": "Classification crop",
"description": "Crop coordinates to use for running classification on this camera."
}
},
"motion": {
"label": "If classification should be run when motion is detected in the crop",
"label": "Run on motion",
"description": "If true, run classification when motion is detected within the specified crop."
},
"interval": {
"label": "Interval to run classification on in seconds",
"label": "Classification interval",
"description": "Interval (seconds) between periodic classification runs for state classification."
}
}

View File

@ -1,6 +1,6 @@
{
"label": "Database",
"description": "Settings for the SQLite database used by Frigate to store events and metadata.",
"description": "Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
"path": {
"label": "Database path",
"description": "Filesystem path where the Frigate SQLite database file will be stored."

View File

@ -2,59 +2,59 @@
"label": "Object Detection",
"description": "Settings for the detection/detect role used to run object detection and initialize trackers.",
"enabled": {
"label": "Detection Enabled",
"label": "Detection enabled",
"description": "Enable or disable object detection for this camera. Detection must be enabled for object tracking to run."
},
"height": {
"label": "Height of the stream for the detect role",
"label": "Detect height",
"description": "Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"width": {
"label": "Width of the stream for the detect role",
"label": "Detect width",
"description": "Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution."
},
"fps": {
"label": "Number of frames per second to process through detection",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended ~5)."
"label": "Detect FPS",
"description": "Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects)."
},
"min_initialized": {
"label": "Minimum number of consecutive hits for an object to be initialized by the tracker",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations."
"label": "Min initialization hits",
"description": "Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2."
},
"max_disappeared": {
"label": "Maximum number of frames the object can disappear before detection ends",
"label": "Max disappeared frames",
"description": "Number of frames without a detection before a tracked object is considered gone."
},
"stationary": {
"label": "Stationary objects config",
"description": "Settings to detect and manage objects that remain stationary for a period of time.",
"interval": {
"label": "Frame interval for checking stationary objects",
"label": "Stationary interval",
"description": "How often (in frames) to run a detection check to confirm a stationary object."
},
"threshold": {
"label": "Number of frames without a position change for an object to be considered stationary",
"label": "Stationary threshold",
"description": "Number of frames with no position change required to mark an object as stationary."
},
"max_frames": {
"label": "Max frames for stationary objects",
"description": "Limits how long stationary objects are tracked before being discarded (override defaults to control retention).",
"label": "Max frames",
"description": "Limits how long stationary objects are tracked before being discarded.",
"default": {
"label": "Default max frames",
"description": "Default maximum frames to track a stationary object before stopping."
},
"objects": {
"label": "Object specific max frames",
"label": "Object max frames",
"description": "Per-object overrides for maximum frames to track stationary objects."
}
},
"classifier": {
"label": "Enable visual classifier for determing if objects with jittery bounding boxes are stationary",
"label": "Enable visual classifier",
"description": "Use a visual classifier to detect truly stationary objects even when bounding boxes jitter."
}
},
"annotation_offset": {
"label": "Milliseconds to offset detect annotations by",
"label": "Annotation offset",
"description": "Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative."
}
}

View File

@ -1,13 +1,53 @@
{
"label": "Detector hardware",
"description": "Configuration for object detectors (CPU, EdgeTPU, GPU backends) and any detector-specific model settings.",
"description": "Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
"type": {
"label": "Detector Type",
"description": "Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino')."
},
"model": {
"label": "Detector specific model configuration",
"description": "Detector-specific model configuration options (path, input size, etc.)."
"description": "Detector-specific model configuration options (path, input size, etc.).",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."
},
"labelmap_path": {
"label": "Label map for custom object detector",
"description": "Path to a labelmap file that maps numeric classes to string labels for the detector."
},
"width": {
"label": "Object detection model input width",
"description": "Width of the model input tensor in pixels."
},
"height": {
"label": "Object detection model input height",
"description": "Height of the model input tensor in pixels."
},
"labelmap": {
"label": "Labelmap customization",
"description": "Overrides or remapping entries to merge into the standard labelmap."
},
"attributes_map": {
"label": "Map of object labels to their attribute labels",
"description": "Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate'])."
},
"input_tensor": {
"label": "Model Input Tensor Shape",
"description": "Tensor format expected by the model: 'nhwc' or 'nchw'."
},
"input_pixel_format": {
"label": "Model Input Pixel Color Format",
"description": "Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'."
},
"input_dtype": {
"label": "Model Input D Type",
"description": "Data type of the model input tensor (for example 'float32')."
},
"model_type": {
"label": "Object Detection Model Type",
"description": "Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization."
}
},
"model_path": {
"label": "Detector specific model path",

View File

@ -1,4 +1,4 @@
{
"label": "Frigate environment variables",
"label": "Environment variables",
"description": "Key/value pairs of environment variables to set for the Frigate process."
}

View File

@ -1,44 +1,44 @@
{
"label": "Face recognition",
"description": "Global settings for face detection and recognition used across cameras unless overridden per-camera.",
"description": "Settings for face detection and recognition; can be overridden per-camera.",
"enabled": {
"label": "Enable face recognition",
"description": "Enable or disable face recognition globally."
},
"model_size": {
"label": "The size of the embeddings model used",
"label": "Model size",
"description": "Model size to use for face embeddings (small/large); larger may require GPU."
},
"unknown_score": {
"label": "Minimum face distance score required to be marked as a potential match",
"description": "Distance threshold below which a face is considered a potential match (lower = stricter)."
"label": "Unknown score threshold",
"description": "Distance threshold below which a face is considered a potential match (higher = stricter)."
},
"detection_threshold": {
"label": "Minimum face detection score required to be considered a face",
"label": "Detection threshold",
"description": "Minimum detection confidence required to consider a face detection valid."
},
"recognition_threshold": {
"label": "Minimum face distance score required to be considered a match",
"label": "Recognition threshold",
"description": "Face embedding distance threshold to consider two faces a match."
},
"min_area": {
"label": "Min area of face box to consider running face recognition",
"label": "Minimum face area",
"description": "Minimum area (pixels) of a detected face box required to attempt recognition."
},
"min_faces": {
"label": "Min face recognitions for the sub label to be applied to the person object",
"label": "Minimum faces",
"description": "Minimum number of face recognitions required before applying a recognized sub-label to a person."
},
"save_attempts": {
"label": "Number of face attempts to save in the recent recognitions tab",
"label": "Saved attempts",
"description": "Number of face recognition attempts to retain for recent recognition UI."
},
"blur_confidence_filter": {
"label": "Apply blur quality filter to face confidence",
"label": "Blur confidence filter",
"description": "Adjust confidence scores based on image blur to reduce false positives for poor quality faces."
},
"device": {
"label": "The device key to use for face recognition",
"label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
}
}

View File

@ -3,66 +3,66 @@
"description": "FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
"path": {
"label": "FFmpeg path",
"description": "Path to the FFmpeg binary to use globally or a version alias."
"description": "Path to the FFmpeg binary to use globally or a version alias (\"5.0\" or \"7.0\")."
},
"global_args": {
"label": "FFmpeg arguments",
"description": "args passed to FFmpeg processes by default."
"label": "FFmpeg global args",
"description": "Global args passed to FFmpeg processes by default."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments",
"description": "hardware acceleration arguments for FFmpeg (auto or provider-specific)."
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for FFmpeg (auto or provider-specific)."
},
"input_args": {
"label": "FFmpeg input arguments",
"description": "input arguments applied to FFmpeg input streams by default."
"label": "Input args",
"description": "Input arguments applied to FFmpeg input streams by default."
},
"output_args": {
"label": "FFmpeg output arguments per role",
"label": "Output args",
"description": "Default output args used for different FFmpeg roles such as detect and record.",
"detect": {
"label": "Detect role FFmpeg output arguments",
"label": "Detect output args",
"description": "Default output args for detect role streams."
},
"record": {
"label": "Record role FFmpeg output arguments",
"label": "Record output args",
"description": "Default output args for record role streams."
}
},
"retry_interval": {
"label": "FFmpeg retry time",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10."
},
"apple_compatibility": {
"label": "Apple compatibility",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index",
"description": "Default GPU index used for hardware acceleration if available."
},
"inputs": {
"label": "Camera inputs",
"description": "List of input stream definitions (paths and roles) for this camera.",
"path": {
"label": "Camera input path",
"label": "Input path",
"description": "Camera input stream URL or path."
},
"roles": {
"label": "Roles assigned to this input",
"label": "Input roles",
"description": "Roles for this input stream (for example: detect, record, audio)."
},
"global_args": {
"label": "FFmpeg arguments",
"label": "FFmpeg args",
"description": "FFmpeg arguments for this input stream."
},
"hwaccel_args": {
"label": "FFmpeg hardware acceleration arguments",
"label": "Hardware acceleration args",
"description": "Hardware acceleration arguments for this input stream."
},
"input_args": {
"label": "FFmpeg input arguments",
"label": "Input args",
"description": "Input arguments specific to this stream."
}
},
"retry_interval": {
"label": "Time in seconds to wait before FFmpeg retries connecting to the camera",
"description": "Seconds to wait before attempting to reconnect a camera stream after failure."
},
"apple_compatibility": {
"label": "Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players",
"description": "Enable HEVC tagging for better Apple player compatibility when recording H.265."
},
"gpu": {
"label": "GPU index to use for hardware acceleration",
"description": "Default GPU index used for hardware acceleration if available."
}
}

View File

@ -1,16 +1,16 @@
{
"label": "Live playback",
"description": "Settings used by the Web UI to control live stream selection, resolution and quality.",
"description": "Settings used by the Web UI to control live stream resolution and quality.",
"streams": {
"label": "Friendly names and restream names to use for live view",
"label": "Live stream names",
"description": "Mapping of configured stream names to restream/go2rtc names used for live playback."
},
"height": {
"label": "Live camera view height",
"label": "Live height",
"description": "Height (pixels) to render the live stream in the Web UI; must be <= detect stream height."
},
"quality": {
"label": "Live camera view quality",
"label": "Live quality",
"description": "Encoding quality for the live jsmpeg stream (1 highest, 31 lowest)."
}
}

View File

@ -2,11 +2,11 @@
"label": "Logging",
"description": "Controls default log verbosity and per-component log level overrides.",
"default": {
"label": "Default logging level",
"label": "Logging level",
"description": "Default global log verbosity (debug, info, warning, error)."
},
"logs": {
"label": "Log level for specified processes",
"label": "Per-process log level",
"description": "Per-component log level overrides to increase or decrease verbosity for specific modules."
}
}

View File

@ -2,59 +2,65 @@
"label": "License Plate Recognition",
"description": "License plate recognition settings including detection thresholds, formatting, and known plates.",
"enabled": {
"label": "Enable license plate recognition",
"label": "Enable LPR",
"description": "Enable or disable LPR globally; camera-level settings can override."
},
"model_size": {
"label": "The size of the embeddings model used",
"label": "Model size",
"description": "Model size used for text detection/recognition; small runs on CPU, large on GPU."
},
"detection_threshold": {
"label": "License plate object confidence score required to begin running recognition",
"label": "Detection threshold",
"description": "Detection confidence threshold to begin running OCR on a suspected plate."
},
"min_area": {
"label": "Minimum area of license plate to begin running recognition",
"label": "Minimum plate area",
"description": "Minimum plate area (pixels) required to attempt recognition."
},
"recognition_threshold": {
"label": "Recognition confidence score required to add the plate to the object as a sub label",
"label": "Recognition threshold",
"description": "Confidence threshold required for recognized plate text to be attached as a sub-label."
},
"min_plate_length": {
"label": "Minimum number of characters a license plate must have to be added to the object as a sub label",
"label": "Min plate length",
"description": "Minimum number of characters a recognized plate must contain to be considered valid."
},
"format": {
"label": "Regular expression for the expected format of license plate",
"label": "Plate format regex",
"description": "Optional regex to validate recognized plate strings against an expected format."
},
"match_distance": {
"label": "Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate",
"label": "Match distance",
"description": "Number of character mismatches allowed when comparing detected plates to known plates."
},
"known_plates": {
"label": "Known plates to track (strings or regular expressions)",
"label": "Known plates",
"description": "List of plates or regexes to specially track or alert on."
},
"enhancement": {
"label": "Amount of contrast adjustment and denoising to apply to license plate images before recognition",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results."
"label": "Enhancement level",
"description": "Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution."
},
"debug_save_plates": {
"label": "Save plates captured for LPR for debugging purposes",
"label": "Save debug plates",
"description": "Save plate crop images for debugging LPR performance."
},
"device": {
"label": "The device key to use for LPR",
"label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
},
"expire_time": {
"label": "Expire plates not seen after number of seconds (for dedicated LPR cameras only)",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
},
"replace_rules": {
"label": "List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'",
"description": "Regex replacement rules used to normalize detected plate strings before matching."
"label": "Replacement rules",
"description": "Regex replacement rules used to normalize detected plate strings before matching.",
"pattern": {
"label": "Regex pattern"
},
"replacement": {
"label": "Replacement string"
}
},
"expire_time": {
"label": "Expire seconds",
"description": "Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only)."
}
}

View File

@ -1,6 +1,6 @@
{
"label": "Detection model",
"description": "Settings to configure a custom object detection model, its input shape, and labelmap overrides.",
"description": "Settings to configure a custom object detection model and its input shape.",
"path": {
"label": "Custom Object detection model path",
"description": "Path to a custom detection model file (or plus://<model_id> for Frigate+ models)."

View File

@ -6,12 +6,12 @@
"description": "Enable or disable motion detection globally; per-camera settings can override this."
},
"threshold": {
"label": "Motion detection threshold (1-255)",
"description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity."
"label": "Motion threshold",
"description": "Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255)."
},
"lightning_threshold": {
"label": "Lightning detection threshold (0.3-1.0)",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive)."
"label": "Lightning threshold",
"description": "Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0)."
},
"improve_contrast": {
"label": "Improve contrast",
@ -22,27 +22,30 @@
"description": "Minimum contour area in pixels required for a motion contour to be counted."
},
"delta_alpha": {
"label": "Delta Alpha",
"label": "Delta alpha",
"description": "Alpha blending factor used in frame differencing for motion calculation."
},
"frame_alpha": {
"label": "Frame Alpha",
"label": "Frame alpha",
"description": "Alpha value used when blending frames for motion preprocessing."
},
"frame_height": {
"label": "Frame Height",
"label": "Frame height",
"description": "Height in pixels to scale frames to when computing motion (useful for performance)."
},
"mask": {
"label": "Coordinates polygon for the motion mask.",
"label": "Mask coordinates",
"description": "Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas."
},
"mqtt_off_delay": {
"label": "Delay for updating MQTT with no motion detected",
"label": "MQTT off delay",
"description": "Seconds to wait after last motion before publishing an MQTT 'off' state."
},
"enabled_in_config": {
"label": "Keep track of original state of motion detection",
"label": "Original motion state",
"description": "Indicates whether motion detection was enabled in the original static configuration."
},
"raw_mask": {
"label": "Raw Mask"
}
}

View File

@ -1,52 +1,52 @@
{
"label": "MQTT",
"description": "Settings for connecting and publishing telemetry, snapshots, and events to an MQTT broker.",
"description": "Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
"enabled": {
"label": "Enable MQTT Communication",
"label": "Enable MQTT",
"description": "Enable or disable MQTT integration for state, events, and snapshots."
},
"host": {
"label": "MQTT Host",
"label": "MQTT host",
"description": "Hostname or IP address of the MQTT broker."
},
"port": {
"label": "MQTT Port",
"label": "MQTT port",
"description": "Port of the MQTT broker (usually 1883 for plain MQTT)."
},
"topic_prefix": {
"label": "MQTT Topic Prefix",
"label": "Topic prefix",
"description": "MQTT topic prefix for all Frigate topics; must be unique if running multiple instances."
},
"client_id": {
"label": "MQTT Client ID",
"label": "Client ID",
"description": "Client identifier used when connecting to the MQTT broker; should be unique per instance."
},
"stats_interval": {
"label": "MQTT Camera Stats Interval",
"label": "Stats interval",
"description": "Interval in seconds for publishing system and camera stats to MQTT."
},
"user": {
"label": "MQTT Username",
"label": "MQTT username",
"description": "Optional MQTT username; can be provided via environment variables or secrets."
},
"password": {
"label": "MQTT Password",
"label": "MQTT password",
"description": "Optional MQTT password; can be provided via environment variables or secrets."
},
"tls_ca_certs": {
"label": "MQTT TLS CA Certificates",
"label": "TLS CA certs",
"description": "Path to CA certificate for TLS connections to the broker (for self-signed certs)."
},
"tls_client_cert": {
"label": "MQTT TLS Client Certificate",
"label": "Client cert",
"description": "Client certificate path for TLS mutual authentication; do not set user/password when using client certs."
},
"tls_client_key": {
"label": "MQTT TLS Client Key",
"label": "Client key",
"description": "Private key path for the client certificate."
},
"tls_insecure": {
"label": "MQTT TLS Insecure",
"label": "TLS insecure",
"description": "Allow insecure TLS connections by skipping hostname verification (not recommended)."
},
"qos": {

View File

@ -2,10 +2,10 @@
"label": "Networking",
"description": "Network-related settings such as IPv6 enablement for Frigate endpoints.",
"ipv6": {
"label": "IPv6 IPv6 settings",
"label": "IPv6 settings",
"description": "IPv6-specific settings for Frigate network services.",
"enabled": {
"label": "Enable IPv6 for port 5000 and/or 8971",
"label": "Enable IPv6",
"description": "Enable IPv6 support for Frigate services (API and UI) where applicable."
},
"listen": {

View File

@ -6,15 +6,15 @@
"description": "Enable or disable notifications globally."
},
"email": {
"label": "Email required for push",
"label": "Notification email",
"description": "Email address used for push notifications or required by certain notification providers."
},
"cooldown": {
"label": "Cooldown period for notifications (time in seconds)",
"label": "Cooldown period",
"description": "Cooldown (seconds) between notifications to avoid spamming recipients."
},
"enabled_in_config": {
"label": "Keep track of original state of notifications",
"label": "Original notifications state",
"description": "Indicates whether notifications were enabled in the original static configuration."
}
}

View File

@ -9,32 +9,35 @@
"label": "Object filters",
"description": "Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
"min_area": {
"label": "Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Minimum bounding box area (pixels or percentage) required for this object type."
"label": "Minimum object area",
"description": "Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"max_area": {
"label": "Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type."
"label": "Maximum object area",
"description": "Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99)."
},
"min_ratio": {
"label": "Minimum ratio of bounding box's width/height for object to be counted",
"label": "Minimum aspect ratio",
"description": "Minimum width/height ratio required for the bounding box to qualify."
},
"max_ratio": {
"label": "Maximum ratio of bounding box's width/height for object to be counted",
"label": "Maximum aspect ratio",
"description": "Maximum width/height ratio allowed for the bounding box to qualify."
},
"threshold": {
"label": "Average detection confidence threshold for object to be counted",
"label": "Avg confidence",
"description": "Average detection confidence threshold required for the object to be considered a true positive."
},
"min_score": {
"label": "Minimum detection confidence for object to be counted",
"label": "Minimum confidence",
"description": "Minimum single-frame detection confidence required for the object to be counted."
},
"mask": {
"label": "Detection area polygon mask for this filter configuration",
"label": "Filter mask",
"description": "Polygon coordinates defining where this filter applies within the frame."
},
"raw_mask": {
"label": "Raw Mask"
}
},
"mask": {
@ -42,50 +45,50 @@
"description": "Mask polygon used to prevent object detection in specified areas."
},
"genai": {
"label": "Config for using genai to analyze objects",
"label": "GenAI object config",
"description": "GenAI options for describing tracked objects and sending frames for generation.",
"enabled": {
"label": "Enable GenAI for camera",
"label": "Enable GenAI",
"description": "Enable GenAI generation of descriptions for tracked objects by default."
},
"use_snapshot": {
"label": "Use snapshots for generating descriptions",
"label": "Use snapshots",
"description": "Use object snapshots instead of thumbnails for GenAI description generation."
},
"prompt": {
"label": "Default caption prompt",
"label": "Caption prompt",
"description": "Default prompt template used when generating descriptions with GenAI."
},
"object_prompts": {
"label": "Object specific prompts",
"label": "Object prompts",
"description": "Per-object prompts to customize GenAI outputs for specific labels."
},
"objects": {
"label": "List of objects to run generative AI for",
"label": "GenAI objects",
"description": "List of object labels to send to GenAI by default."
},
"required_zones": {
"label": "List of required zones to be entered in order to run generative AI",
"label": "Required zones",
"description": "Zones that must be entered for objects to qualify for GenAI description generation."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes",
"label": "Save thumbnails",
"description": "Save thumbnails sent to GenAI for debugging and review."
},
"send_triggers": {
"label": "What triggers to use to send frames to generative AI for a tracked object",
"label": "GenAI triggers",
"description": "Defines when frames should be sent to GenAI (on end, after updates, etc.).",
"tracked_object_end": {
"label": "Send once the object is no longer tracked",
"label": "Send on end",
"description": "Send a request to GenAI when the tracked object ends."
},
"after_significant_updates": {
"label": "Send an early request to generative AI when X frames accumulated",
"label": "Early GenAI trigger",
"description": "Send a request to GenAI after a specified number of significant updates for the tracked object."
}
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI",
"label": "Original GenAI state",
"description": "Indicates whether GenAI was enabled in the original static config."
}
}

View File

@ -2,71 +2,71 @@
"label": "ONVIF",
"description": "ONVIF connection and PTZ autotracking settings for this camera.",
"host": {
"label": "Onvif Host",
"label": "ONVIF host",
"description": "Host (and optional scheme) for the ONVIF service for this camera."
},
"port": {
"label": "Onvif Port",
"label": "ONVIF port",
"description": "Port number for the ONVIF service."
},
"user": {
"label": "Onvif Username",
"label": "ONVIF username",
"description": "Username for ONVIF authentication; some devices require admin user for ONVIF."
},
"password": {
"label": "Onvif Password",
"label": "ONVIF password",
"description": "Password for ONVIF authentication."
},
"tls_insecure": {
"label": "Onvif Disable TLS verification",
"label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"autotracking": {
"label": "PTZ auto tracking config",
"label": "PTZ config",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
"enabled": {
"label": "Enable PTZ object autotracking",
"label": "Enable Autotracking",
"description": "Enable or disable automatic PTZ camera tracking of detected objects."
},
"calibrate_on_startup": {
"label": "Perform a camera calibration when Frigate starts",
"label": "Calibrate on start",
"description": "Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration."
},
"zooming": {
"label": "Autotracker zooming mode",
"label": "Zoom mode",
"description": "Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom)."
},
"zoom_factor": {
"label": "Zooming factor (0.1-0.75)",
"description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking."
"label": "Zoom factor",
"description": "Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75."
},
"track": {
"label": "Objects to track",
"description": "List of object types from labelmap.txt that should trigger autotracking."
"label": "Tracked objects",
"description": "List of object types that should trigger autotracking."
},
"required_zones": {
"label": "List of required zones to be entered in order to begin autotracking",
"label": "Required zones",
"description": "Objects must enter one of these zones before autotracking begins."
},
"return_preset": {
"label": "Name of camera preset to return to when object tracking is over",
"label": "Return preset",
"description": "ONVIF preset name configured in camera firmware to return to after tracking ends."
},
"timeout": {
"label": "Seconds to delay before returning to preset",
"label": "Return timeout",
"description": "Wait this many seconds after losing tracking before returning camera to preset position."
},
"movement_weights": {
"label": "Internal value used for PTZ movements based on the speed of your camera's motor",
"label": "Movement weights",
"description": "Calibration values automatically generated by camera calibration. Do not modify manually."
},
"enabled_in_config": {
"label": "Keep track of original state of autotracking",
"label": "Original autotrack state",
"description": "Internal field to track whether autotracking was enabled in configuration."
}
},
"ignore_time_mismatch": {
"label": "Onvif Ignore Time Synchronization Mismatch Between Camera and Server",
"label": "Ignore time mismatch",
"description": "Ignore time synchronization differences between camera and Frigate server for ONVIF communication."
}
}

View File

@ -2,35 +2,35 @@
"label": "Proxy",
"description": "Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
"header_map": {
"label": "Header mapping definitions for proxy user passing",
"label": "Header mapping",
"description": "Map incoming proxy headers to Frigate user and role fields for proxy-based auth.",
"user": {
"label": "Header name from upstream proxy to identify user",
"label": "User header",
"description": "Header containing the authenticated username provided by the upstream proxy."
},
"role": {
"label": "Header name from upstream proxy to identify user role",
"label": "Role header",
"description": "Header containing the authenticated user's role or groups from the upstream proxy."
},
"role_map": {
"label": "Mapping of Frigate roles to upstream group values. ",
"label": "Role mapping",
"description": "Map upstream group values to Frigate roles (for example map admin groups to the admin role)."
}
},
"logout_url": {
"label": "Redirect url for logging out with proxy",
"label": "Logout URL",
"description": "URL to redirect users to when logging out via the proxy."
},
"auth_secret": {
"label": "Secret value for proxy authentication",
"label": "Proxy secret",
"description": "Optional secret checked against the X-Proxy-Secret header to verify trusted proxies."
},
"default_role": {
"label": "Default role for proxy users",
"label": "Default role",
"description": "Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer)."
},
"separator": {
"label": "The character used to separate values in a mapped header",
"description": "Character used to split multiple values provided in proxy headers (for example a comma)."
"label": "Separator character",
"description": "Character used to split multiple values provided in proxy headers."
}
}

View File

@ -2,99 +2,95 @@
"label": "Recording",
"description": "Recording and retention settings applied to cameras unless overridden per-camera.",
"enabled": {
"label": "Enable record on all cameras",
"label": "Enable recording",
"description": "Enable or disable recording globally; individual cameras can override this."
},
"expire_interval": {
"label": "Number of minutes to wait between cleanup runs",
"label": "Record cleanup interval",
"description": "Minutes between cleanup passes that remove expired recording segments."
},
"continuous": {
"label": "Continuous recording retention settings",
"label": "Continuous retention",
"description": "Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Default retention period",
"description": "Days to retain continuous (always-on) recordings."
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"motion": {
"label": "Motion recording retention settings",
"label": "Motion retention",
"description": "Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
"days": {
"label": "Default retention period",
"description": "Days to retain recordings triggered by motion."
"label": "Retention days",
"description": "Days to retain recordings."
}
},
"detections": {
"label": "Detection specific retention settings",
"label": "Detection retention",
"description": "Recording retention settings for detection events including pre/post capture durations.",
"pre_capture": {
"label": "Seconds to retain before event starts",
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Seconds to retain after event ends",
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention settings",
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Default retention period",
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retain mode",
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"alerts": {
"label": "Alert specific retention settings",
"label": "Alert retention",
"description": "Recording retention settings for alert events including pre/post capture durations.",
"pre_capture": {
"label": "Seconds to retain before event starts",
"description": "Number of seconds before the alert event to include in the recording."
"label": "Pre-capture seconds",
"description": "Number of seconds before the detection event to include in the recording."
},
"post_capture": {
"label": "Seconds to retain after event ends",
"description": "Number of seconds after the alert event to include in the recording."
"label": "Post-capture seconds",
"description": "Number of seconds after the detection event to include in the recording."
},
"retain": {
"label": "Event retention settings",
"description": "Retention settings for recordings of alert events.",
"label": "Event retention",
"description": "Retention settings for recordings of detection events.",
"days": {
"label": "Default retention period",
"description": "Number of days to retain recordings of alert events."
"label": "Retention days",
"description": "Number of days to retain recordings of detection events."
},
"mode": {
"label": "Retain mode",
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
}
}
},
"export": {
"label": "Recording Export Config",
"label": "Export config",
"description": "Settings used when exporting recordings such as timelapse and hardware acceleration.",
"timelapse_args": {
"label": "Timelapse output arguments",
"description": "FFmpeg arguments for timelapse exports. Default args fit 24 hours of recording into 1 hour playback (-vf setpts=0.04*PTS -r 30)."
},
"hwaccel_args": {
"label": "Export-specific FFmpeg hardware acceleration arguments",
"label": "Export hwaccel args",
"description": "Hardware acceleration args to use for export/transcode operations."
}
},
"preview": {
"label": "Recording Preview Config",
"label": "Preview config",
"description": "Settings controlling the quality of recording previews shown in the UI.",
"quality": {
"label": "Quality of recording preview",
"label": "Preview quality",
"description": "Preview quality level (very_low, low, medium, high, very_high)."
}
},
"enabled_in_config": {
"label": "Keep track of original state of recording",
"label": "Original recording state",
"description": "Indicates whether recording was enabled in the original static configuration."
}
}

View File

@ -2,58 +2,58 @@
"label": "Review",
"description": "Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
"alerts": {
"label": "Review alerts config",
"label": "Alerts config",
"description": "Settings for which tracked objects generate alerts and how alerts are retained.",
"enabled": {
"label": "Enable alerts",
"description": "Enable or disable alert generation for this camera."
},
"labels": {
"label": "Labels to create alerts for",
"label": "Alert labels",
"description": "List of object labels that qualify as alerts (for example: car, person)."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as an alert",
"label": "Required zones",
"description": "Zones that an object must enter to be considered an alert; leave empty to allow any zone."
},
"enabled_in_config": {
"label": "Keep track of original state of alerts",
"label": "Original alerts state",
"description": "Tracks whether alerts were originally enabled in the static configuration."
},
"cutoff_time": {
"label": "Time to cutoff alerts after no alert-causing activity has occurred",
"label": "Alerts cutoff time",
"description": "Seconds to wait after no alert-causing activity before cutting off an alert."
}
},
"detections": {
"label": "Review detections config",
"label": "Detections config",
"description": "Settings for creating detection events (non-alert) and how long to keep them.",
"enabled": {
"label": "Enable detections",
"description": "Enable or disable detection events for this camera."
},
"labels": {
"label": "Labels to create detections for",
"label": "Detection labels",
"description": "List of object labels that qualify as detection events."
},
"required_zones": {
"label": "List of required zones to be entered in order to save the event as a detection",
"label": "Required zones",
"description": "Zones that an object must enter to be considered a detection; leave empty to allow any zone."
},
"cutoff_time": {
"label": "Time to cutoff detection after no detection-causing activity has occurred",
"label": "Detections cutoff time",
"description": "Seconds to wait after no detection-causing activity before cutting off a detection."
},
"enabled_in_config": {
"label": "Keep track of original state of detections",
"label": "Original detections state",
"description": "Tracks whether detections were originally enabled in the static configuration."
}
},
"genai": {
"label": "Review description genai config",
"label": "GenAI config",
"description": "Controls use of generative AI for producing descriptions and summaries of review items.",
"enabled": {
"label": "Enable GenAI descriptions for review items",
"label": "Enable GenAI descriptions",
"description": "Enable or disable GenAI-generated descriptions and summaries for review items."
},
"alerts": {
@ -65,27 +65,27 @@
"description": "Use GenAI to generate descriptions for detection items."
},
"image_source": {
"label": "Image source for review descriptions",
"label": "Review image source",
"description": "Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens."
},
"additional_concerns": {
"label": "Additional concerns that GenAI should make note of on this camera",
"label": "Additional concerns",
"description": "A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera."
},
"debug_save_thumbnails": {
"label": "Save thumbnails sent to generative AI for debugging purposes",
"label": "Save thumbnails",
"description": "Save thumbnails that are sent to the GenAI provider for debugging and review."
},
"enabled_in_config": {
"label": "Keep track of original state of generative AI",
"label": "Original GenAI state",
"description": "Tracks whether GenAI review was originally enabled in the static configuration."
},
"preferred_language": {
"label": "Preferred language for GenAI Response",
"label": "Preferred language",
"description": "Preferred language to request from the GenAI provider for generated responses."
},
"activity_context_prompt": {
"label": "Custom activity context prompt defining normal and suspicious activity patterns for this property",
"label": "Activity context prompt",
"description": "Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries."
}
}

View File

@ -1,4 +1,4 @@
{
"label": "If Frigate should be started in safe mode",
"label": "Safe mode",
"description": "When enabled, start Frigate in safe mode with reduced features for troubleshooting."
}

View File

@ -1,46 +1,52 @@
{
"label": "Semantic Search",
"description": "Settings for semantic search which builds and queries object embeddings to find similar items.",
"description": "Settings for Semantic Search which builds and queries object embeddings to find similar items.",
"enabled": {
"label": "Enable semantic search",
"description": "Enable or disable the semantic search feature."
},
"reindex": {
"label": "Reindex all tracked objects on startup",
"label": "Reindex on startup",
"description": "Trigger a full reindex of historical tracked objects into the embeddings database."
},
"model": {
"label": "The CLIP model to use for semantic search",
"label": "Semantic search model",
"description": "The embeddings model to use for semantic search (for example 'jinav1')."
},
"model_size": {
"label": "The size of the embeddings model used",
"label": "Model size",
"description": "Select model size; 'small' runs on CPU and 'large' typically requires GPU."
},
"device": {
"label": "The device key to use for semantic search",
"label": "Device",
"description": "This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information"
},
"triggers": {
"label": "Trigger actions on tracked objects that match existing thumbnails or descriptions",
"label": "Triggers",
"description": "Actions and matching criteria for camera-specific semantic search triggers.",
"friendly_name": {
"label": "Trigger friendly name used in the Frigate UI"
"label": "Friendly name",
"description": "Optional friendly name displayed in the UI for this trigger."
},
"enabled": {
"label": "Enable this trigger"
"label": "Enable this trigger",
"description": "Enable or disable this semantic search trigger."
},
"type": {
"label": "Type of trigger"
"label": "Trigger type",
"description": "Type of trigger: 'thumbnail' (match against image) or 'description' (match against text)."
},
"data": {
"label": "Trigger content (text phrase or image ID)"
"label": "Trigger content",
"description": "Text phrase or thumbnail ID to match against tracked objects."
},
"threshold": {
"label": "Confidence score required to run the trigger"
"label": "Trigger threshold",
"description": "Minimum similarity score (0-1) required to activate this trigger."
},
"actions": {
"label": "Actions to perform when trigger is matched"
"label": "Trigger actions",
"description": "List of actions to execute when trigger matches (notification, sub_label, attribute)."
}
}
}

View File

@ -6,47 +6,47 @@
"description": "Enable or disable saving snapshots globally."
},
"clean_copy": {
"label": "Create a clean copy of the snapshot image",
"label": "Save clean copy",
"description": "Save an unannotated clean copy of snapshots in addition to annotated ones."
},
"timestamp": {
"label": "Add a timestamp overlay on the snapshot",
"label": "Timestamp overlay",
"description": "Overlay a timestamp on saved snapshots."
},
"bounding_box": {
"label": "Add a bounding box overlay on the snapshot",
"label": "Bounding box overlay",
"description": "Draw bounding boxes for tracked objects on saved snapshots."
},
"crop": {
"label": "Crop the snapshot to the detected object",
"label": "Crop snapshot",
"description": "Crop saved snapshots to the detected object's bounding box."
},
"required_zones": {
"label": "List of required zones to be entered in order to save a snapshot",
"label": "Required zones",
"description": "Zones an object must enter for a snapshot to be saved."
},
"height": {
"label": "Snapshot image height",
"label": "Snapshot height",
"description": "Height (pixels) to resize saved snapshots to; leave empty to preserve original size."
},
"retain": {
"label": "Snapshot retention",
"description": "Retention settings for saved snapshots including default days and per-object overrides.",
"default": {
"label": "Default retention period",
"label": "Default retention",
"description": "Default number of days to retain snapshots."
},
"mode": {
"label": "Retain mode",
"label": "Retention mode",
"description": "Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects)."
},
"objects": {
"label": "Object retention period",
"label": "Object retention",
"description": "Per-object overrides for snapshot retention days."
}
},
"quality": {
"label": "Quality of the encoded jpeg (0-100)",
"label": "JPEG quality",
"description": "JPEG encode quality for saved snapshots (0-100)."
}
}

View File

@ -2,31 +2,31 @@
"label": "Telemetry",
"description": "System telemetry and stats options including GPU and network bandwidth monitoring.",
"network_interfaces": {
"label": "Enabled network interfaces for bandwidth calculation",
"label": "Network interfaces",
"description": "List of network interface name prefixes to monitor for bandwidth statistics."
},
"stats": {
"label": "System Stats",
"label": "System stats",
"description": "Options to enable/disable collection of various system and GPU statistics.",
"amd_gpu_stats": {
"label": "Enable AMD GPU stats",
"label": "AMD GPU stats",
"description": "Enable collection of AMD GPU statistics if an AMD GPU is present."
},
"intel_gpu_stats": {
"label": "Enable Intel GPU stats",
"label": "Intel GPU stats",
"description": "Enable collection of Intel GPU statistics if an Intel GPU is present."
},
"network_bandwidth": {
"label": "Enable network bandwidth for ffmpeg processes",
"label": "Network bandwidth",
"description": "Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities)."
},
"intel_gpu_device": {
"label": "Define the device to use when gathering SR-IOV stats",
"label": "SR-IOV device",
"description": "Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats."
}
},
"version_check": {
"label": "Enable latest version check",
"label": "Version check",
"description": "Enable an outbound check to detect if a newer Frigate version is available."
}
}

View File

@ -2,7 +2,7 @@
"label": "TLS",
"description": "TLS settings for Frigate's web endpoints (port 8971).",
"enabled": {
"label": "Enable TLS for port 8971",
"label": "Enable TLS",
"description": "Enable TLS for Frigate's web UI and API on the configured TLS port."
}
}

View File

@ -2,23 +2,23 @@
"label": "UI",
"description": "User interface preferences such as timezone, time/date formatting, and units.",
"timezone": {
"label": "Override UI timezone",
"label": "Timezone",
"description": "Optional timezone to display across the UI (defaults to browser local time if unset)."
},
"time_format": {
"label": "Override UI time format",
"label": "Time format",
"description": "Time format to use in the UI (browser, 12hour, or 24hour)."
},
"date_style": {
"label": "Override UI dateStyle",
"label": "Date style",
"description": "Date style to use in the UI (full, long, medium, short)."
},
"time_style": {
"label": "Override UI timeStyle",
"label": "Time style",
"description": "Time style to use in the UI (full, long, medium, short)."
},
"unit_system": {
"label": "The unit system to use for measurements",
"label": "Unit system",
"description": "Unit system for display (metric or imperial) used in the UI and MQTT."
}
}