add descriptions to all pydantic fields

This commit is contained in:
Josh Hawkins 2026-01-29 08:50:15 -06:00
parent aad24497bf
commit e128aaaa61
30 changed files with 1238 additions and 367 deletions

View File

@ -8,35 +8,59 @@ __all__ = ["AuthConfig"]
class AuthConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable authentication")
enabled: bool = Field(
default=True,
title="Enable authentication",
description="Enable native authentication for the Frigate UI.",
)
reset_admin_password: bool = Field(
default=False, title="Reset the admin password on startup"
default=False,
title="Reset the admin password on startup",
description="If true, reset the admin user's password on startup and print the new password in logs.",
)
cookie_name: str = Field(
default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
default="frigate_token",
title="Name for jwt token cookie",
description="Name of the cookie used to store the JWT token for native authentication.",
pattern=r"^[a-z_]+$",
)
cookie_secure: bool = Field(
default=False,
title="Set secure flag on cookie",
description="Set the secure flag on the auth cookie; should be true when using TLS.",
)
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
session_length: int = Field(
default=86400, title="Session length for jwt session tokens", ge=60
default=86400,
title="Session length for jwt session tokens",
description="Session duration in seconds for JWT-based sessions.",
ge=60,
)
refresh_time: int = Field(
default=1800,
title="Refresh the session if it is going to expire in this many seconds",
description="When a session is within this many seconds of expiring, refresh it back to full length.",
ge=30,
)
failed_login_rate_limit: Optional[str] = Field(
default=None,
title="Rate limits for failed login attempts.",
title="Rate limits for failed login attempts",
description="Rate limiting rules for failed login attempts to reduce brute-force attacks.",
)
trusted_proxies: list[str] = Field(
default=[],
title="Trusted proxies for determining IP address to rate limit",
description="List of trusted proxy IPs used when determining client IP for rate limiting.",
)
# As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256
hash_iterations: int = Field(default=600000, title="Password hash iterations")
hash_iterations: int = Field(
default=600000,
title="Password hash iterations",
description="Number of PBKDF2-SHA256 iterations to use when hashing user passwords.",
)
roles: Dict[str, List[str]] = Field(
default_factory=dict,
title="Role to camera mappings. Empty list grants access to all cameras.",
title="Role to camera mappings. Empty list grants access to all cameras",
description="Map roles to camera lists. An empty list grants access to all cameras for the role.",
)
admin_first_time_login: Optional[bool] = Field(
default=False,

View File

@ -22,20 +22,39 @@ class AudioFilterConfig(FrigateBaseModel):
class AudioConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable audio events.")
enabled: bool = Field(
default=False,
title="Enable audio events",
description="Enable or disable audio event detection; can be overridden per-camera.",
)
max_not_heard: int = Field(
default=30, title="Seconds of not hearing the type of audio to end the event."
default=30,
title="Seconds of not hearing the type of audio to end the event",
description="Amount of seconds without the configured audio type before the audio event is ended.",
)
min_volume: int = Field(
default=500, title="Min volume required to run audio detection."
default=500,
title="Min volume required to run audio detection",
description="Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low).",
)
listen: list[str] = Field(
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
default=DEFAULT_LISTEN_AUDIO,
title="Audio to listen for",
description="List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell).",
)
filters: Optional[dict[str, AudioFilterConfig]] = Field(
None, title="Audio filters."
None,
title="Audio filters",
description="Per-audio-type filter settings such as confidence thresholds used to reduce false positives.",
)
enabled_in_config: Optional[bool] = Field(
None, title="Keep track of original state of audio detection."
None,
title="Keep track of original state of audio detection",
description="Indicates whether audio detection was originally enabled in the static config file.",
)
num_threads: int = Field(
default=2,
title="Number of detection threads",
description="Number of threads to use for audio detection processing.",
ge=1,
)
num_threads: int = Field(default=2, title="Number of detection threads", ge=1)

View File

@ -29,45 +29,88 @@ class BirdseyeModeEnum(str, Enum):
class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field(
default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
default=2.0,
title="Birdseye scaling factor",
description="Scaling factor used by the layout calculator (range 1.0 to 5.0).",
ge=1.0,
le=5.0,
)
max_cameras: Optional[int] = Field(
default=None,
title="Max cameras",
description="Maximum number of cameras to display at once in Birdseye; shows the most recent cameras.",
)
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view.")
enabled: bool = Field(
default=True,
title="Enable birdseye view",
description="Enable or disable the Birdseye view feature.",
)
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode."
default=BirdseyeModeEnum.objects,
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
)
restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
width: int = Field(default=1280, title="Birdseye width.")
height: int = Field(default=720, title="Birdseye height.")
restream: bool = Field(
default=False,
title="Restream birdseye via RTSP",
description="Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously.",
)
width: int = Field(
default=1280,
title="Birdseye width",
description="Output width (pixels) of the composed Birdseye frame.",
)
height: int = Field(
default=720,
title="Birdseye height",
description="Output height (pixels) of the composed Birdseye frame.",
)
quality: int = Field(
default=8,
title="Encoding quality.",
title="Encoding quality",
description="Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest).",
ge=1,
le=31,
)
inactivity_threshold: int = Field(
default=30, title="Birdseye Inactivity Threshold", gt=0
default=30,
title="Birdseye Inactivity Threshold",
description="Seconds of inactivity after which a camera will stop being shown in Birdseye.",
gt=0,
)
layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
default_factory=BirdseyeLayoutConfig,
title="Birdseye Layout",
description="Layout options for the Birdseye composition.",
)
idle_heartbeat_fps: float = Field(
default=0.0,
ge=0.0,
le=10.0,
title="Idle heartbeat FPS (0 disables, max 10)",
description="Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable.",
)
# uses BaseModel because some global attributes are not available at the camera level
class BirdseyeCameraConfig(BaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view for camera.")
enabled: bool = Field(
default=True,
title="Enable birdseye view",
description="Enable or disable the Birdseye view feature.",
)
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode for camera."
default=BirdseyeModeEnum.objects,
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
)
order: int = Field(default=0, title="Position of the camera in the birdseye view.")
order: int = Field(
default=0,
title="Position of the camera in the birdseye view",
description="Numeric position controlling the camera's ordering in the Birdseye layout.",
)

View File

@ -50,10 +50,17 @@ class CameraTypeEnum(str, Enum):
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME)
name: Optional[str] = Field(
None,
title="Camera Name",
description="Camera name is required",
pattern=REGEX_CAMERA_NAME,
)
friendly_name: Optional[str] = Field(
None, title="Camera friendly name used in the Frigate UI."
None,
title="Camera friendly name used in the Frigate UI",
description="Camera friendly name used in the Frigate UI",
)
@model_validator(mode="before")
@ -63,80 +70,129 @@ class CameraConfig(FrigateBaseModel):
pass
return values
enabled: bool = Field(default=True, title="Enable camera.")
enabled: bool = Field(default=True, title="Enabled", description="Enabled")
# Options with global fallback
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Audio events configuration."
default_factory=AudioConfig,
title="Audio events",
description="Settings for audio-based event detection; can be overridden per-camera.",
)
audio_transcription: CameraAudioTranscriptionConfig = Field(
default_factory=CameraAudioTranscriptionConfig,
title="Audio transcription config.",
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
)
birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
default_factory=BirdseyeCameraConfig,
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
)
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration."
default_factory=DetectConfig,
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
default_factory=CameraFaceRecognitionConfig,
title="Face recognition",
description="Settings for face detection and recognition; can be overridden per-camera.",
)
ffmpeg: CameraFfmpegConfig = Field(
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
)
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
default_factory=CameraLiveConfig,
title="Live playback",
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
default_factory=CameraLicensePlateRecognitionConfig,
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
motion: MotionConfig = Field(
None,
title="Motion detection",
description="Default motion detection settings; can be overridden per-camera.",
)
motion: MotionConfig = Field(None, title="Motion detection configuration.")
objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Object configuration."
default_factory=ObjectConfig,
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
)
record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration."
default_factory=RecordConfig,
title="Recording",
description="Recording and retention settings; can be overridden per-camera.",
)
review: ReviewConfig = Field(
default_factory=ReviewConfig, title="Review configuration."
default_factory=ReviewConfig,
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage; can be overridden per-camera.",
)
semantic_search: CameraSemanticSearchConfig = Field(
default_factory=CameraSemanticSearchConfig,
title="Semantic search configuration.",
title="Semantic Search",
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Snapshot configuration."
default_factory=SnapshotsConfig,
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
)
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig, title="Timestamp style configuration."
default_factory=TimestampStyleConfig,
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
)
# Options without global fallback
best_image_timeout: int = Field(
default=60,
title="How long to wait for the image with the highest confidence score.",
description="How long to wait for the image with the highest confidence score.",
)
mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig, title="MQTT configuration."
default_factory=CameraMqttConfig,
title="MQTT",
description="MQTT image publishing settings.",
)
notifications: NotificationConfig = Field(
default_factory=NotificationConfig, title="Notifications configuration."
default_factory=NotificationConfig,
title="Notifications",
description="Settings to enable and control notifications; can be overridden per-camera.",
)
onvif: OnvifConfig = Field(
default_factory=OnvifConfig, title="Camera Onvif Configuration."
default_factory=OnvifConfig,
title="ONVIF",
description="ONVIF connection and PTZ autotracking settings for this camera.",
)
type: CameraTypeEnum = Field(
default=CameraTypeEnum.generic,
title="Camera Type",
description="Camera Type",
)
type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type")
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig, title="Camera UI Modifications."
default_factory=CameraUiConfig,
title="Camera UI",
description="Display ordering and dashboard visibility for this camera in the UI.",
)
webui_url: Optional[str] = Field(
None,
title="URL to visit the camera directly from system page",
description="URL to visit the camera directly from system page",
)
zones: dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration."
default_factory=dict,
title="Zones",
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of camera."
default=None,
title="Keep track of original state of camera.",
description="Keep track of original state of camera.",
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()

View File

@ -8,56 +8,82 @@ __all__ = ["DetectConfig", "StationaryConfig", "StationaryMaxFramesConfig"]
class StationaryMaxFramesConfig(FrigateBaseModel):
default: Optional[int] = Field(default=None, title="Default max frames.", ge=1)
default: Optional[int] = Field(
default=None,
title="Default max frames",
description="Default maximum frames to track a stationary object before stopping.",
ge=1,
)
objects: dict[str, int] = Field(
default_factory=dict, title="Object specific max frames."
default_factory=dict,
title="Object specific max frames",
description="Per-object overrides for maximum frames to track stationary objects.",
)
class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field(
default=None,
title="Frame interval for checking stationary objects.",
title="Frame interval for checking stationary objects",
description="How often (in frames) to run a detection check to confirm a stationary object.",
gt=0,
)
threshold: Optional[int] = Field(
default=None,
title="Number of frames without a position change for an object to be considered stationary",
description="Number of frames with no position change required to mark an object as stationary.",
ge=1,
)
max_frames: StationaryMaxFramesConfig = Field(
default_factory=StationaryMaxFramesConfig,
title="Max frames for stationary objects.",
title="Max frames for stationary objects",
description="Limits how long stationary objects are tracked before being discarded.",
)
classifier: bool = Field(
default=True,
title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.",
title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary",
description="Use a visual classifier to detect truly stationary objects even when bounding boxes jitter.",
)
class DetectConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Detection Enabled.")
enabled: bool = Field(
default=False,
title="Detection Enabled",
description="Enable or disable object detection for this camera. Detection must be enabled for object tracking to run.",
)
height: Optional[int] = Field(
default=None, title="Height of the stream for the detect role."
default=None,
title="Height of the stream for the detect role",
description="Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
)
width: Optional[int] = Field(
default=None, title="Width of the stream for the detect role."
default=None,
title="Width of the stream for the detect role",
description="Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
)
fps: int = Field(
default=5, title="Number of frames per second to process through detection."
default=5,
title="Number of frames per second to process through detection",
description="Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects).",
)
min_initialized: Optional[int] = Field(
default=None,
title="Minimum number of consecutive hits for an object to be initialized by the tracker.",
title="Minimum number of consecutive hits for an object to be initialized by the tracker",
description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.",
)
max_disappeared: Optional[int] = Field(
default=None,
title="Maximum number of frames the object can disappear before detection ends.",
title="Maximum number of frames the object can disappear before detection ends",
description="Number of frames without a detection before a tracked object is considered gone.",
)
stationary: StationaryConfig = Field(
default_factory=StationaryConfig,
title="Stationary objects config.",
title="Stationary objects config",
description="Settings to detect and manage objects that remain stationary for a period of time.",
)
annotation_offset: int = Field(
default=0, title="Milliseconds to offset detect annotations by."
default=0,
title="Milliseconds to offset detect annotations by",
description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.",
)

View File

@ -35,39 +35,58 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, list[str]] = Field(
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect role FFmpeg output arguments.",
title="Detect role FFmpeg output arguments",
description="Default output args for detect role streams.",
)
record: Union[str, list[str]] = Field(
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
title="Record role FFmpeg output arguments",
description="Default output args for record role streams.",
)
class FfmpegConfig(FrigateBaseModel):
path: str = Field(default="default", title="FFmpeg path")
path: str = Field(
default="default",
title="FFmpeg path",
description='Path to the FFmpeg binary to use globally or a version alias ("5.0" or "7.0").',
)
global_args: Union[str, list[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
default=FFMPEG_GLOBAL_ARGS_DEFAULT,
title="FFmpeg arguments",
description="Global args passed to FFmpeg processes by default.",
)
hwaccel_args: Union[str, list[str]] = Field(
default="auto", title="FFmpeg hardware acceleration arguments."
default="auto",
title="FFmpeg hardware acceleration arguments",
description="Hardware acceleration arguments for FFmpeg (auto or provider-specific).",
)
input_args: Union[str, list[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
default=FFMPEG_INPUT_ARGS_DEFAULT,
title="FFmpeg input arguments",
description="Input arguments applied to FFmpeg input streams by default.",
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig,
title="FFmpeg output arguments per role.",
title="FFmpeg output arguments per role",
description="Default output args used for different FFmpeg roles such as detect and record.",
)
retry_interval: float = Field(
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
title="Time in seconds to wait before FFmpeg retries connecting to the camera",
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
gt=0.0,
)
apple_compatibility: bool = Field(
default=False,
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players",
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
)
gpu: int = Field(
default=0,
title="GPU index to use for hardware acceleration",
description="Default GPU index used for hardware acceleration if available.",
)
gpu: int = Field(default=0, title="GPU index to use for hardware acceleration.")
@property
def ffmpeg_path(self) -> str:
@ -95,21 +114,36 @@ class CameraRoleEnum(str, Enum):
class CameraInput(FrigateBaseModel):
path: EnvString = Field(title="Camera input path.")
roles: list[CameraRoleEnum] = Field(title="Roles assigned to this input.")
path: EnvString = Field(
title="Camera input path",
description="Camera input stream URL or path.",
)
roles: list[CameraRoleEnum] = Field(
title="Roles assigned to this input",
description="Roles for this input stream (for example: detect, record, audio).",
)
global_args: Union[str, list[str]] = Field(
default_factory=list, title="FFmpeg global arguments."
default_factory=list,
title="FFmpeg arguments",
description="FFmpeg arguments for this input stream.",
)
hwaccel_args: Union[str, list[str]] = Field(
default_factory=list, title="FFmpeg hardware acceleration arguments."
default_factory=list,
title="FFmpeg hardware acceleration arguments",
description="Hardware acceleration arguments for this input stream.",
)
input_args: Union[str, list[str]] = Field(
default_factory=list, title="FFmpeg input arguments."
default_factory=list,
title="FFmpeg input arguments",
description="Input arguments specific to this stream.",
)
class CameraFfmpegConfig(FfmpegConfig):
inputs: list[CameraInput] = Field(title="Camera inputs.")
inputs: list[CameraInput] = Field(
title="Camera inputs",
description="List of input stream definitions (paths and roles) for this camera.",
)
@field_validator("inputs")
@classmethod

View File

@ -10,7 +10,18 @@ __all__ = ["CameraLiveConfig"]
class CameraLiveConfig(FrigateBaseModel):
streams: Dict[str, str] = Field(
default_factory=list,
title="Friendly names and restream names to use for live view.",
title="Friendly names and restream names to use for live view",
description="Mapping of configured stream names to restream/go2rtc names used for live playback.",
)
height: int = Field(
default=720,
title="Live camera view height",
description="Height (pixels) to render the live stream in the Web UI; must be <= detect stream height.",
)
quality: int = Field(
default=8,
ge=1,
le=31,
title="Live camera view quality",
description="Encoding quality for the live jsmpeg stream (1 highest, 31 lowest).",
)
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")

View File

@ -8,30 +8,64 @@ __all__ = ["MotionConfig"]
class MotionConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable motion on all cameras.")
enabled: bool = Field(
default=True,
title="Enable motion detection",
description="Enable or disable motion detection globally; per-camera settings can override this.",
)
threshold: int = Field(
default=30,
title="Motion detection threshold (1-255).",
title="Motion detection threshold (1-255)",
description="Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255).",
ge=1,
le=255,
)
lightning_threshold: float = Field(
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
default=0.8,
title="Lightning detection threshold (0.3-1.0)",
description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0).",
ge=0.3,
le=1.0,
)
improve_contrast: bool = Field(
default=True,
title="Improve contrast",
description="Apply contrast improvement to frames before motion analysis to help detection.",
)
contour_area: Optional[int] = Field(
default=10,
title="Contour area",
description="Minimum contour area in pixels required for a motion contour to be counted.",
)
delta_alpha: float = Field(
default=0.2,
title="Delta Alpha",
description="Alpha blending factor used in frame differencing for motion calculation.",
)
frame_alpha: float = Field(
default=0.01,
title="Frame Alpha",
description="Alpha value used when blending frames for motion preprocessing.",
)
frame_height: Optional[int] = Field(
default=100,
title="Frame Height",
description="Height in pixels to scale frames to when computing motion (useful for performance).",
)
improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=10, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
frame_height: Optional[int] = Field(default=100, title="Frame Height")
mask: Union[str, list[str]] = Field(
default="", title="Coordinates polygon for the motion mask."
default="",
title="Coordinates polygon for the motion mask.",
description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.",
)
mqtt_off_delay: int = Field(
default=30,
title="Delay for updating MQTT with no motion detected.",
title="Delay for updating MQTT with no motion detected",
description="Seconds to wait after last motion before publishing an MQTT 'off' state.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of motion detection."
default=None,
title="Keep track of original state of motion detection",
description="Indicates whether motion detection was enabled in the original static configuration.",
)
raw_mask: Union[str, list[str]] = ""

View File

@ -6,18 +6,40 @@ __all__ = ["CameraMqttConfig"]
class CameraMqttConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Send image over MQTT.")
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
height: int = Field(default=270, title="MQTT image height.")
enabled: bool = Field(
default=True,
title="Send image over MQTT",
description="Enable publishing image snapshots for objects to MQTT topics for this camera.",
)
timestamp: bool = Field(
default=True,
title="Add timestamp to MQTT image",
description="Overlay a timestamp on images published to MQTT.",
)
bounding_box: bool = Field(
default=True,
title="Add bounding box to MQTT image",
description="Draw bounding boxes on images published over MQTT.",
)
crop: bool = Field(
default=True,
title="Crop MQTT image to detected object",
description="Crop images published to MQTT to the detected object's bounding box.",
)
height: int = Field(
default=270,
title="MQTT image height",
description="Height (pixels) to resize images published over MQTT.",
)
required_zones: list[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to send the image.",
title="List of required zones to be entered in order to send the image",
description="Zones that an object must enter for an MQTT image to be published.",
)
quality: int = Field(
default=70,
title="Quality of the encoded jpeg (0-100).",
title="Quality of the encoded jpeg (0-100)",
description="JPEG quality for images published to MQTT (0-100).",
ge=0,
le=100,
)

View File

@ -8,11 +8,24 @@ __all__ = ["NotificationConfig"]
class NotificationConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable notifications")
email: Optional[str] = Field(default=None, title="Email required for push.")
enabled: bool = Field(
default=False,
title="Enable notifications",
description="Enable or disable notifications globally.",
)
email: Optional[str] = Field(
default=None,
title="Email required for push",
description="Email address used for push notifications or required by certain notification providers.",
)
cooldown: int = Field(
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
default=0,
ge=0,
title="Cooldown period for notifications (time in seconds)",
description="Cooldown (seconds) between notifications to avoid spamming recipients.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of notifications."
default=None,
title="Keep track of original state of notifications",
description="Indicates whether notifications were enabled in the original static configuration.",
)

View File

@ -13,30 +13,38 @@ DEFAULT_TRACKED_OBJECTS = ["person"]
class FilterConfig(FrigateBaseModel):
min_area: Union[int, float] = Field(
default=0,
title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
description="Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
max_area: Union[int, float] = Field(
default=24000000,
title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99)",
description="Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
)
min_ratio: float = Field(
default=0,
title="Minimum ratio of bounding box's width/height for object to be counted.",
title="Minimum ratio of bounding box's width/height for object to be counted",
description="Minimum width/height ratio required for the bounding box to qualify.",
)
max_ratio: float = Field(
default=24000000,
title="Maximum ratio of bounding box's width/height for object to be counted.",
title="Maximum ratio of bounding box's width/height for object to be counted",
description="Maximum width/height ratio allowed for the bounding box to qualify.",
)
threshold: float = Field(
default=0.7,
title="Average detection confidence threshold for object to be counted.",
title="Average detection confidence threshold for object to be counted",
description="Average detection confidence threshold required for the object to be considered a true positive.",
)
min_score: float = Field(
default=0.5, title="Minimum detection confidence for object to be counted."
default=0.5,
title="Minimum detection confidence for object to be counted",
description="Minimum single-frame detection confidence required for the object to be counted.",
)
mask: Optional[Union[str, list[str]]] = Field(
default=None,
title="Detection area polygon mask for this filter configuration.",
title="Detection area polygon mask for this filter configuration",
description="Polygon coordinates defining where this filter applies within the frame.",
)
raw_mask: Union[str, list[str]] = ""
@ -51,46 +59,64 @@ class FilterConfig(FrigateBaseModel):
class GenAIObjectTriggerConfig(FrigateBaseModel):
tracked_object_end: bool = Field(
default=True, title="Send once the object is no longer tracked."
default=True,
title="Send once the object is no longer tracked",
description="Send a request to GenAI when the tracked object ends.",
)
after_significant_updates: Optional[int] = Field(
default=None,
title="Send an early request to generative AI when X frames accumulated.",
title="Send an early request to generative AI when X frames accumulated",
description="Send a request to GenAI after a specified number of significant updates for the tracked object.",
ge=1,
)
class GenAIObjectConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
enabled: bool = Field(
default=False,
title="Enable GenAI for camera",
description="Enable GenAI generation of descriptions for tracked objects by default.",
)
use_snapshot: bool = Field(
default=False, title="Use snapshots for generating descriptions."
default=False,
title="Use snapshots for generating descriptions",
description="Use object snapshots instead of thumbnails for GenAI description generation.",
)
prompt: str = Field(
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
title="Default caption prompt.",
title="Default caption prompt",
description="Default prompt template used when generating descriptions with GenAI.",
)
object_prompts: dict[str, str] = Field(
default_factory=dict, title="Object specific prompts."
default_factory=dict,
title="Object specific prompts",
description="Per-object prompts to customize GenAI outputs for specific labels.",
)
objects: Union[str, list[str]] = Field(
default_factory=list,
title="List of objects to run generative AI for.",
title="List of objects to run generative AI for",
description="List of object labels to send to GenAI by default.",
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="List of required zones to be entered in order to run generative AI.",
title="List of required zones to be entered in order to run generative AI",
description="Zones that must be entered for objects to qualify for GenAI description generation.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
title="Save thumbnails sent to generative AI for debugging purposes",
description="Save thumbnails sent to GenAI for debugging and review.",
)
send_triggers: GenAIObjectTriggerConfig = Field(
default_factory=GenAIObjectTriggerConfig,
title="What triggers to use to send frames to generative AI for a tracked object.",
title="What triggers to use to send frames to generative AI for a tracked object",
description="Defines when frames should be sent to GenAI (on end, after updates, etc.).",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of generative AI."
default=None,
title="Keep track of original state of generative AI",
description="Indicates whether GenAI was enabled in the original static config.",
)
@field_validator("required_zones", mode="before")
@ -103,14 +129,25 @@ class GenAIObjectConfig(FrigateBaseModel):
class ObjectConfig(FrigateBaseModel):
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
filters: dict[str, FilterConfig] = Field(
default_factory=dict, title="Object filters."
track: list[str] = Field(
default=DEFAULT_TRACKED_OBJECTS,
title="Objects to track",
description="List of object labels to track globally; camera configs can override this.",
)
filters: dict[str, FilterConfig] = Field(
default_factory=dict,
title="Object filters",
description="Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
)
mask: Union[str, list[str]] = Field(
default="",
title="Object mask",
description="Mask polygon used to prevent object detection in specified areas.",
)
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
genai: GenAIObjectConfig = Field(
default_factory=GenAIObjectConfig,
title="Config for using genai to analyze objects.",
title="Config for using genai to analyze objects",
description="GenAI options for describing tracked objects and sending frames for generation.",
)
_all_objects: list[str] = PrivateAttr()

View File

@ -17,37 +17,57 @@ class ZoomingModeEnum(str, Enum):
class PtzAutotrackConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
enabled: bool = Field(
default=False,
title="Enable PTZ object autotracking",
description="Enable or disable automatic PTZ camera tracking of detected objects.",
)
calibrate_on_startup: bool = Field(
default=False, title="Perform a camera calibration when Frigate starts."
default=False,
title="Perform a camera calibration when Frigate starts",
description="Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration.",
)
zooming: ZoomingModeEnum = Field(
default=ZoomingModeEnum.disabled, title="Autotracker zooming mode."
default=ZoomingModeEnum.disabled,
title="Autotracker zooming mode",
description="Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom).",
)
zoom_factor: float = Field(
default=0.3,
title="Zooming factor (0.1-0.75).",
title="Zooming factor (0.1-0.75)",
description="Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75.",
ge=0.1,
le=0.75,
)
track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
track: list[str] = Field(
default=DEFAULT_TRACKED_OBJECTS,
title="Objects to track",
description="List of object types from labelmap.txt that should trigger autotracking.",
)
required_zones: list[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to begin autotracking.",
title="List of required zones to be entered in order to begin autotracking",
description="Objects must enter one of these zones before autotracking begins.",
)
return_preset: str = Field(
default="home",
title="Name of camera preset to return to when object tracking is over.",
title="Name of camera preset to return to when object tracking is over",
description="ONVIF preset name configured in camera firmware to return to after tracking ends.",
)
timeout: int = Field(
default=10, title="Seconds to delay before returning to preset."
default=10,
title="Seconds to delay before returning to preset",
description="Wait this many seconds after losing tracking before returning camera to preset position.",
)
movement_weights: Optional[Union[str, list[str]]] = Field(
default_factory=list,
title="Internal value used for PTZ movements based on the speed of your camera's motor.",
title="Internal value used for PTZ movements based on the speed of your camera's motor",
description="Calibration values automatically generated by camera calibration. Do not modify manually.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of autotracking."
default=None,
title="Keep track of original state of autotracking",
description="Internal field to track whether autotracking was enabled in configuration.",
)
@field_validator("movement_weights", mode="before")
@ -72,16 +92,38 @@ class PtzAutotrackConfig(FrigateBaseModel):
class OnvifConfig(FrigateBaseModel):
host: str = Field(default="", title="Onvif Host")
port: int = Field(default=8000, title="Onvif Port")
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
host: str = Field(
default="",
title="Onvif Host",
description="Host (and optional scheme) for the ONVIF service for this camera.",
)
port: int = Field(
default=8000,
title="Onvif Port",
description="Port number for the ONVIF service.",
)
user: Optional[EnvString] = Field(
default=None,
title="Onvif Username",
description="Username for ONVIF authentication; some devices require admin user for ONVIF.",
)
password: Optional[EnvString] = Field(
default=None,
title="Onvif Password",
description="Password for ONVIF authentication.",
)
tls_insecure: bool = Field(
default=False,
title="Onvif Disable TLS verification",
description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).",
)
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",
title="PTZ auto tracking config",
description="Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
)
ignore_time_mismatch: bool = Field(
default=False,
title="Onvif Ignore Time Synchronization Mismatch Between Camera and Server",
description="Ignore time synchronization differences between camera and Frigate server for ONVIF communication.",
)

View File

@ -21,7 +21,12 @@ __all__ = [
class RecordRetainConfig(FrigateBaseModel):
days: float = Field(default=0, ge=0, title="Default retention period.")
days: float = Field(
default=0,
ge=0,
title="Default retention period",
description="Days to retain recordings.",
)
class RetainModeEnum(str, Enum):
@ -31,22 +36,37 @@ class RetainModeEnum(str, Enum):
class ReviewRetainConfig(FrigateBaseModel):
days: float = Field(default=10, ge=0, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
days: float = Field(
default=10,
ge=0,
title="Default retention period",
description="Number of days to retain recordings of detection events.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retain mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
class EventsConfig(FrigateBaseModel):
pre_capture: int = Field(
default=5,
title="Seconds to retain before event starts.",
title="Seconds to retain before event starts",
description="Number of seconds before the detection event to include in the recording.",
le=MAX_PRE_CAPTURE,
ge=0,
)
post_capture: int = Field(
default=5, ge=0, title="Seconds to retain after event ends."
default=5,
ge=0,
title="Seconds to retain after event ends",
description="Number of seconds after the detection event to include in the recording.",
)
retain: ReviewRetainConfig = Field(
default_factory=ReviewRetainConfig, title="Event retention settings."
default_factory=ReviewRetainConfig,
title="Event retention settings",
description="Retention settings for recordings of detection events.",
)
@ -60,43 +80,65 @@ class RecordQualityEnum(str, Enum):
class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium, title="Quality of recording preview."
default=RecordQualityEnum.medium,
title="Quality of recording preview",
description="Preview quality level (very_low, low, medium, high, very_high).",
)
class RecordExportConfig(FrigateBaseModel):
hwaccel_args: Union[str, list[str]] = Field(
default="auto", title="Export-specific FFmpeg hardware acceleration arguments."
default="auto",
title="Export-specific FFmpeg hardware acceleration arguments",
description="Hardware acceleration args to use for export/transcode operations.",
)
class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.")
enabled: bool = Field(
default=False,
title="Enable record on all cameras",
description="Enable or disable recording globally; individual cameras can override this.",
)
expire_interval: int = Field(
default=60,
title="Number of minutes to wait between cleanup runs.",
title="Number of minutes to wait between cleanup runs",
description="Minutes between cleanup passes that remove expired recording segments.",
)
continuous: RecordRetainConfig = Field(
default_factory=RecordRetainConfig,
title="Continuous recording retention settings.",
title="Continuous recording retention settings",
description="Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
)
motion: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, title="Motion recording retention settings."
default_factory=RecordRetainConfig,
title="Motion recording retention settings",
description="Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
)
detections: EventsConfig = Field(
default_factory=EventsConfig, title="Detection specific retention settings."
default_factory=EventsConfig,
title="Detection specific retention settings",
description="Recording retention settings for detection events including pre/post capture durations.",
)
alerts: EventsConfig = Field(
default_factory=EventsConfig, title="Alert specific retention settings."
default_factory=EventsConfig,
title="Alert specific retention settings",
description="Recording retention settings for alert events including pre/post capture durations.",
)
export: RecordExportConfig = Field(
default_factory=RecordExportConfig, title="Recording Export Config"
default_factory=RecordExportConfig,
title="Recording Export Config",
description="Settings used when exporting recordings such as timelapse and hardware acceleration.",
)
preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig, title="Recording Preview Config"
default_factory=RecordPreviewConfig,
title="Recording Preview Config",
description="Settings controlling the quality of recording previews shown in the UI.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of recording."
default=None,
title="Keep track of original state of recording",
description="Indicates whether recording was enabled in the original static configuration.",
)
@property

View File

@ -21,22 +21,32 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"]
class AlertsConfig(FrigateBaseModel):
"""Configure alerts"""
enabled: bool = Field(default=True, title="Enable alerts.")
enabled: bool = Field(
default=True,
title="Enable alerts",
description="Enable or disable alert generation for this camera.",
)
labels: list[str] = Field(
default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for."
default=DEFAULT_ALERT_OBJECTS,
title="Labels to create alerts for",
description="List of object labels that qualify as alerts (for example: car, person).",
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the event as an alert.",
title="List of required zones to be entered in order to save the event as an alert",
description="Zones that an object must enter to be considered an alert; leave empty to allow any zone.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of alerts."
default=None,
title="Keep track of original state of alerts",
description="Tracks whether alerts were originally enabled in the static configuration.",
)
cutoff_time: int = Field(
default=40,
title="Time to cutoff alerts after no alert-causing activity has occurred.",
title="Time to cutoff alerts after no alert-causing activity has occurred",
description="Seconds to wait after no alert-causing activity before cutting off an alert.",
)
@field_validator("required_zones", mode="before")
@ -51,22 +61,32 @@ class AlertsConfig(FrigateBaseModel):
class DetectionsConfig(FrigateBaseModel):
"""Configure detections"""
enabled: bool = Field(default=True, title="Enable detections.")
enabled: bool = Field(
default=True,
title="Enable detections",
description="Enable or disable detection events for this camera.",
)
labels: Optional[list[str]] = Field(
default=None, title="Labels to create detections for."
default=None,
title="Labels to create detections for",
description="List of object labels that qualify as detection events.",
)
required_zones: Union[str, list[str]] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the event as a detection.",
title="List of required zones to be entered in order to save the event as a detection",
description="Zones that an object must enter to be considered a detection; leave empty to allow any zone.",
)
cutoff_time: int = Field(
default=30,
title="Time to cutoff detection after no detection-causing activity has occurred.",
title="Time to cutoff detection after no detection-causing activity has occurred",
description="Seconds to wait after no detection-causing activity before cutting off a detection.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of detections."
default=None,
title="Keep track of original state of detections",
description="Tracks whether detections were originally enabled in the static configuration.",
)
@field_validator("required_zones", mode="before")
@ -81,27 +101,42 @@ class DetectionsConfig(FrigateBaseModel):
class GenAIReviewConfig(FrigateBaseModel):
enabled: bool = Field(
default=False,
title="Enable GenAI descriptions for review items.",
title="Enable GenAI descriptions for review items",
description="Enable or disable GenAI-generated descriptions and summaries for review items.",
)
alerts: bool = Field(
default=True,
title="Enable GenAI for alerts",
description="Use GenAI to generate descriptions for alert items.",
)
detections: bool = Field(
default=False,
title="Enable GenAI for detections",
description="Use GenAI to generate descriptions for detection items.",
)
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
detections: bool = Field(default=False, title="Enable GenAI for detections.")
image_source: ImageSourceEnum = Field(
default=ImageSourceEnum.preview,
title="Image source for review descriptions.",
title="Image source for review descriptions",
description="Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens.",
)
additional_concerns: list[str] = Field(
default=[],
title="Additional concerns that GenAI should make note of on this camera.",
title="Additional concerns that GenAI should make note of on this camera",
description="A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera.",
)
debug_save_thumbnails: bool = Field(
default=False,
title="Save thumbnails sent to generative AI for debugging purposes.",
title="Save thumbnails sent to generative AI for debugging purposes",
description="Save thumbnails that are sent to the GenAI provider for debugging and review.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of generative AI."
default=None,
title="Keep track of original state of generative AI",
description="Tracks whether GenAI review was originally enabled in the static configuration.",
)
preferred_language: str | None = Field(
title="Preferred language for GenAI Response",
description="Preferred language to request from the GenAI provider for generated responses.",
default=None,
)
activity_context_prompt: str = Field(
@ -139,19 +174,24 @@ Evaluate in this order:
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""",
title="Custom activity context prompt defining normal and suspicious activity patterns for this property.",
title="Custom activity context prompt defining normal and suspicious activity patterns for this property",
description="Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries.",
)
class ReviewConfig(FrigateBaseModel):
"""Configure reviews"""
alerts: AlertsConfig = Field(
default_factory=AlertsConfig, title="Review alerts config."
default_factory=AlertsConfig,
title="Review alerts config",
description="Settings for which tracked objects generate alerts and how alerts are retained.",
)
detections: DetectionsConfig = Field(
default_factory=DetectionsConfig, title="Review detections config."
default_factory=DetectionsConfig,
title="Review detections config",
description="Settings for creating detection events (non-alert) and how long to keep them.",
)
genai: GenAIReviewConfig = Field(
default_factory=GenAIReviewConfig, title="Review description genai config."
default_factory=GenAIReviewConfig,
title="Review description genai config",
description="Controls use of generative AI for producing descriptions and summaries of review items.",
)

View File

@ -9,36 +9,68 @@ __all__ = ["SnapshotsConfig", "RetainConfig"]
class RetainConfig(FrigateBaseModel):
default: float = Field(default=10, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
default: float = Field(
default=10,
title="Default retention period",
description="Default number of days to retain snapshots.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retain mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
objects: dict[str, float] = Field(
default_factory=dict, title="Object retention period."
default_factory=dict,
title="Object retention period",
description="Per-object overrides for snapshot retention days.",
)
class SnapshotsConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Snapshots enabled.")
enabled: bool = Field(
default=False,
title="Snapshots enabled",
description="Enable or disable saving snapshots globally.",
)
clean_copy: bool = Field(
default=True, title="Create a clean copy of the snapshot image."
default=True,
title="Create a clean copy of the snapshot image",
description="Save an unannotated clean copy of snapshots in addition to annotated ones.",
)
timestamp: bool = Field(
default=False, title="Add a timestamp overlay on the snapshot."
default=False,
title="Add a timestamp overlay on the snapshot",
description="Overlay a timestamp on saved snapshots.",
)
bounding_box: bool = Field(
default=True, title="Add a bounding box overlay on the snapshot."
default=True,
title="Add a bounding box overlay on the snapshot",
description="Draw bounding boxes for tracked objects on saved snapshots.",
)
crop: bool = Field(
default=False,
title="Crop the snapshot to the detected object",
description="Crop saved snapshots to the detected object's bounding box.",
)
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
required_zones: list[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save a snapshot.",
title="List of required zones to be entered in order to save a snapshot",
description="Zones an object must enter for a snapshot to be saved.",
)
height: Optional[int] = Field(
default=None,
title="Snapshot image height",
description="Height (pixels) to resize saved snapshots to; leave empty to preserve original size.",
)
height: Optional[int] = Field(default=None, title="Snapshot image height.")
retain: RetainConfig = Field(
default_factory=RetainConfig, title="Snapshot retention."
default_factory=RetainConfig,
title="Snapshot retention",
description="Retention settings for saved snapshots including default days and per-object overrides.",
)
quality: int = Field(
default=70,
title="Quality of the encoded jpeg (0-100).",
title="Quality of the encoded jpeg (0-100)",
description="JPEG encode quality for saved snapshots (0-100).",
ge=0,
le=100,
)

View File

@ -27,9 +27,27 @@ class TimestampPositionEnum(str, Enum):
class ColorConfig(FrigateBaseModel):
red: int = Field(default=255, ge=0, le=255, title="Red")
green: int = Field(default=255, ge=0, le=255, title="Green")
blue: int = Field(default=255, ge=0, le=255, title="Blue")
red: int = Field(
default=255,
ge=0,
le=255,
title="Red",
description="Red component (0-255) for timestamp color.",
)
green: int = Field(
default=255,
ge=0,
le=255,
title="Green",
description="Green component (0-255) for timestamp color.",
)
blue: int = Field(
default=255,
ge=0,
le=255,
title="Blue",
description="Blue component (0-255) for timestamp color.",
)
class TimestampEffectEnum(str, Enum):
@ -39,11 +57,27 @@ class TimestampEffectEnum(str, Enum):
class TimestampStyleConfig(FrigateBaseModel):
position: TimestampPositionEnum = Field(
default=TimestampPositionEnum.tl, title="Timestamp position."
default=TimestampPositionEnum.tl,
title="Timestamp position",
description="Position of the timestamp on the image (tl/tr/bl/br).",
)
format: str = Field(
default=DEFAULT_TIME_FORMAT,
title="Timestamp format",
description="Datetime format string used for timestamps (Python datetime format codes).",
)
color: ColorConfig = Field(
default_factory=ColorConfig,
title="Timestamp color",
description="RGB color values for the timestamp text (all values 0-255).",
)
thickness: int = Field(
default=2,
title="Timestamp thickness",
description="Line thickness of the timestamp text.",
)
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(
default=None, title="Timestamp effect."
default=None,
title="Timestamp effect",
description="Visual effect for the timestamp text (none, solid, shadow).",
)

View File

@ -6,7 +6,18 @@ __all__ = ["CameraUiConfig"]
class CameraUiConfig(FrigateBaseModel):
order: int = Field(default=0, title="Order of camera in UI.")
dashboard: bool = Field(
default=True, title="Show this camera in Frigate dashboard UI."
"""Camera UI
Display ordering and dashboard visibility for this camera in the UI.
"""
order: int = Field(
default=0,
title="Order of camera in UI",
description="Numeric order used to sort the camera in the UI; larger numbers appear later.",
)
dashboard: bool = Field(
default=True,
title="Show this camera in Frigate dashboard UI",
description="Toggle whether this camera is visible in the main dashboard.",
)

View File

@ -14,36 +14,46 @@ logger = logging.getLogger(__name__)
class ZoneConfig(BaseModel):
friendly_name: Optional[str] = Field(
None, title="Zone friendly name used in the Frigate UI."
None,
title="Zone friendly name used in the Frigate UI.",
description="A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used.",
)
filters: dict[str, FilterConfig] = Field(
default_factory=dict, title="Zone filters."
default_factory=dict,
title="Zone filters.",
description="Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.",
)
coordinates: Union[str, list[str]] = Field(
title="Coordinates polygon for the defined zone."
title="Coordinates polygon for the defined zone.",
description="Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy).",
)
distances: Optional[Union[str, list[str]]] = Field(
default_factory=list,
title="Real-world distances for the sides of quadrilateral for the defined zone.",
description="Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set.",
)
inertia: int = Field(
default=3,
title="Number of consecutive frames required for object to be considered present in the zone.",
gt=0,
description="Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections.",
)
loitering_time: int = Field(
default=0,
ge=0,
title="Number of seconds that an object must loiter to be considered in the zone.",
description="Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection.",
)
speed_threshold: Optional[float] = Field(
default=None,
ge=0.1,
title="Minimum speed value for an object to be considered in the zone.",
description="Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers.",
)
objects: Union[str, list[str]] = Field(
default_factory=list,
title="List of objects that can trigger the zone.",
description="List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered.",
)
_color: Optional[tuple[int, int, int]] = PrivateAttr()
_contour: np.ndarray = PrivateAttr()

View File

@ -8,13 +8,21 @@ __all__ = ["CameraGroupConfig"]
class CameraGroupConfig(FrigateBaseModel):
"""Represents a group of cameras."""
cameras: Union[str, list[str]] = Field(
default_factory=list, title="List of cameras in this group."
default_factory=list,
title="List of cameras in this group",
description="Array of camera names included in this group.",
)
icon: str = Field(
default="generic",
title="Icon that represents camera group",
description="Icon used to represent the camera group in the UI.",
)
order: int = Field(
default=0,
title="Sort order for group",
description="Numeric order used to sort camera groups in the UI; larger numbers appear later.",
)
icon: str = Field(default="generic", title="Icon that represents camera group.")
order: int = Field(default=0, title="Sort order for group.")
@field_validator("cameras", mode="before")
@classmethod

View File

@ -43,28 +43,43 @@ class ObjectClassificationType(str, Enum):
class AudioTranscriptionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable audio transcription.")
enabled: bool = Field(
default=False,
title="Enable audio transcription",
description="Enable or disable automatic audio transcription globally.",
)
language: str = Field(
default="en",
title="Language abbreviation to use for audio event transcription/translation.",
title="Language abbreviation to use for audio event transcription/translation",
description="Language code used for transcription/translation (for example 'en' for English).",
)
device: Optional[EnrichmentsDeviceEnum] = Field(
default=EnrichmentsDeviceEnum.CPU,
title="The device used for audio transcription.",
title="The device used for audio transcription",
description="Device key (CPU/GPU) to run the transcription model on.",
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
default="small",
title="The size of the embeddings model used",
description="Model size to use for transcription; the small model runs on CPU, large model requires a GPU.",
)
live_enabled: Optional[bool] = Field(
default=False, title="Enable live transcriptions."
default=False,
title="Enable live transcriptions",
description="Enable streaming live transcription for audio as it is received.",
)
class BirdClassificationConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable bird classification.")
enabled: bool = Field(
default=False,
title="Enable bird classification",
description="Enable or disable bird classification.",
)
threshold: float = Field(
default=0.9,
title="Minimum classification score required to be considered a match.",
title="Minimum classification score required to be considered a match",
description="Minimum classification score required to accept a bird classification.",
gt=0.0,
le=1.0,
)
@ -72,42 +87,61 @@ class BirdClassificationConfig(FrigateBaseModel):
class CustomClassificationStateCameraConfig(FrigateBaseModel):
crop: list[float, float, float, float] = Field(
title="Crop of image frame on this camera to run classification on."
title="Crop of image frame on this camera to run classification on",
description="Crop coordinates to use for running classification on this camera.",
)
class CustomClassificationStateConfig(FrigateBaseModel):
cameras: Dict[str, CustomClassificationStateCameraConfig] = Field(
title="Cameras to run classification on."
title="Cameras to run classification on",
description="Per-camera crop and settings for running state classification.",
)
motion: bool = Field(
default=False,
title="If classification should be run when motion is detected in the crop.",
title="If classification should be run when motion is detected in the crop",
description="If true, run classification when motion is detected within the specified crop.",
)
interval: int | None = Field(
default=None,
title="Interval to run classification on in seconds.",
title="Interval to run classification on in seconds",
description="Interval (seconds) between periodic classification runs for state classification.",
gt=0,
)
class CustomClassificationObjectConfig(FrigateBaseModel):
objects: list[str] = Field(title="Object types to classify.")
objects: list[str] = Field(
title="Object types to classify",
description="List of object types to run object classification on.",
)
classification_type: ObjectClassificationType = Field(
default=ObjectClassificationType.sub_label,
title="Type of classification that is applied.",
title="Type of classification that is applied",
description="Classification type applied: 'sub_label' (adds sub_label) or other supported types.",
)
class CustomClassificationConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable running the model.")
name: str | None = Field(default=None, title="Name of classification model.")
enabled: bool = Field(
default=True,
title="Enable running the model",
description="Enable or disable the custom classification model.",
)
name: str | None = Field(
default=None,
title="Name of classification model",
description="Identifier for the custom classification model to use.",
)
threshold: float = Field(
default=0.8, title="Classification score threshold to change the state."
default=0.8,
title="Classification score threshold to change the state",
description="Score threshold used to change the classification state.",
)
save_attempts: int | None = Field(
default=None,
title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification",
description="How many classification attempts to save for recent classifications UI.",
ge=0,
)
object_config: CustomClassificationObjectConfig | None = Field(default=None)
@ -116,47 +150,76 @@ class CustomClassificationConfig(FrigateBaseModel):
class ClassificationConfig(FrigateBaseModel):
bird: BirdClassificationConfig = Field(
default_factory=BirdClassificationConfig, title="Bird classification config."
default_factory=BirdClassificationConfig,
title="Bird classification config",
description="Settings specific to bird classification models.",
)
custom: Dict[str, CustomClassificationConfig] = Field(
default={}, title="Custom Classification Model Configs."
default={},
title="Custom Classification Models",
description="Configuration for custom classification models used for objects or state detection.",
)
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
enabled: bool = Field(
default=False,
title="Enable semantic search",
description="Enable or disable the semantic search feature.",
)
reindex: Optional[bool] = Field(
default=False, title="Reindex all tracked objects on startup."
default=False,
title="Reindex all tracked objects on startup",
description="Trigger a full reindex of historical tracked objects into the embeddings database.",
)
model: Optional[SemanticSearchModelEnum] = Field(
default=SemanticSearchModelEnum.jinav1,
title="The CLIP model to use for semantic search.",
title="The CLIP model to use for semantic search",
description="The embeddings model to use for semantic search (for example 'jinav1').",
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
default="small",
title="The size of the embeddings model used",
description="Select model size; 'small' runs on CPU and 'large' typically requires GPU.",
)
device: Optional[str] = Field(
default=None,
title="The device key to use for semantic search.",
title="The device key to use for semantic search",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class TriggerConfig(FrigateBaseModel):
friendly_name: Optional[str] = Field(
None, title="Trigger friendly name used in the Frigate UI."
None,
title="Trigger friendly name used in the Frigate UI",
description="Optional friendly name displayed in the UI for this trigger.",
)
enabled: bool = Field(
default=True,
title="Enable this trigger",
description="Enable or disable this semantic search trigger.",
)
type: TriggerType = Field(
default=TriggerType.DESCRIPTION,
title="Type of trigger",
description="Type of trigger: 'thumbnail' (match against image) or 'description' (match against text).",
)
data: str = Field(
title="Trigger content (text phrase or image ID)",
description="Text phrase or thumbnail ID to match against tracked objects.",
)
enabled: bool = Field(default=True, title="Enable this trigger")
type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger")
data: str = Field(title="Trigger content (text phrase or image ID)")
threshold: float = Field(
title="Confidence score required to run the trigger",
description="Minimum similarity score (0-1) required to activate this trigger.",
default=0.8,
gt=0.0,
le=1.0,
)
actions: List[TriggerAction] = Field(
default=[], title="Actions to perform when trigger is matched"
default=[],
title="Actions to perform when trigger is matched",
description="List of actions to execute when trigger matches (notification, sub_label, attribute).",
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@ -166,62 +229,84 @@ class CameraSemanticSearchConfig(FrigateBaseModel):
triggers: Dict[str, TriggerConfig] = Field(
default={},
title="Trigger actions on tracked objects that match existing thumbnails or descriptions",
description="Actions and matching criteria for camera-specific semantic search triggers.",
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
enabled: bool = Field(
default=False,
title="Enable face recognition",
description="Enable or disable face recognition globally.",
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
default="small",
title="The size of the embeddings model used",
description="Model size to use for face embeddings (small/large); larger may require GPU.",
)
unknown_score: float = Field(
title="Minimum face distance score required to be marked as a potential match.",
title="Minimum face distance score required to be marked as a potential match",
description="Distance threshold below which a face is considered a potential match (lower = stricter).",
default=0.8,
gt=0.0,
le=1.0,
)
detection_threshold: float = Field(
default=0.7,
title="Minimum face detection score required to be considered a face.",
title="Minimum face detection score required to be considered a face",
description="Minimum detection confidence required to consider a face detection valid.",
gt=0.0,
le=1.0,
)
recognition_threshold: float = Field(
default=0.9,
title="Minimum face distance score required to be considered a match.",
title="Minimum face distance score required to be considered a match",
description="Face embedding distance threshold to consider two faces a match.",
gt=0.0,
le=1.0,
)
min_area: int = Field(
default=750, title="Min area of face box to consider running face recognition."
default=750,
title="Min area of face box to consider running face recognition",
description="Minimum area (pixels) of a detected face box required to attempt recognition.",
)
min_faces: int = Field(
default=1,
gt=0,
le=6,
title="Min face recognitions for the sub label to be applied to the person object.",
title="Min face recognitions for the sub label to be applied to the person object",
description="Minimum number of face recognitions required before applying a recognized sub-label to a person.",
)
save_attempts: int = Field(
default=200,
ge=0,
title="Number of face attempts to save in the recent recognitions tab.",
title="Number of face attempts to save in the recent recognitions tab",
description="Number of face recognition attempts to retain for recent recognition UI.",
)
blur_confidence_filter: bool = Field(
default=True, title="Apply blur quality filter to face confidence."
default=True,
title="Apply blur quality filter to face confidence",
description="Adjust confidence scores based on image blur to reduce false positives for poor quality faces.",
)
device: Optional[str] = Field(
default=None,
title="The device key to use for face recognition.",
title="The device key to use for face recognition",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
class CameraFaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
enabled: bool = Field(
default=False,
title="Enable face recognition",
description="Enable or disable face recognition globally.",
)
min_area: int = Field(
default=750, title="Min area of face box to consider running face recognition."
default=750,
title="Min area of face box to consider running face recognition",
description="Minimum area (pixels) of a detected face box required to attempt recognition.",
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@ -235,77 +320,101 @@ class ReplaceRule(FrigateBaseModel):
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
enabled: bool = Field(
default=False,
title="Enable license plate recognition",
description="Enable or disable LPR globally; camera-level settings can override.",
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
default="small",
title="The size of the embeddings model used",
description="Model size used for text detection/recognition; small runs on CPU, large on GPU.",
)
detection_threshold: float = Field(
default=0.7,
title="License plate object confidence score required to begin running recognition.",
title="License plate object confidence score required to begin running recognition",
description="Detection confidence threshold to begin running OCR on a suspected plate.",
gt=0.0,
le=1.0,
)
min_area: int = Field(
default=1000,
title="Minimum area of license plate to begin running recognition.",
title="Minimum area of license plate to begin running recognition",
description="Minimum plate area (pixels) required to attempt recognition.",
)
recognition_threshold: float = Field(
default=0.9,
title="Recognition confidence score required to add the plate to the object as a sub label.",
title="Recognition confidence score required to add the plate to the object as a sub label",
description="Confidence threshold required for recognized plate text to be attached as a sub-label.",
gt=0.0,
le=1.0,
)
min_plate_length: int = Field(
default=4,
title="Minimum number of characters a license plate must have to be added to the object as a sub label.",
title="Minimum number of characters a license plate must have to be added to the object as a sub label",
description="Minimum number of characters a recognized plate must contain to be considered valid.",
)
format: Optional[str] = Field(
default=None,
title="Regular expression for the expected format of license plate.",
title="Regular expression for the expected format of license plate",
description="Optional regex to validate recognized plate strings against an expected format.",
)
match_distance: int = Field(
default=1,
title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate.",
title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate",
description="Number of character mismatches allowed when comparing detected plates to known plates.",
ge=0,
)
known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, title="Known plates to track (strings or regular expressions)."
default={},
title="Known plates to track (strings or regular expressions)",
description="List of plates or regexes to specially track or alert on.",
)
enhancement: int = Field(
default=0,
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition",
description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results.",
ge=0,
le=10,
)
debug_save_plates: bool = Field(
default=False,
title="Save plates captured for LPR for debugging purposes.",
title="Save plates captured for LPR for debugging purposes",
description="Save plate crop images for debugging LPR performance.",
)
device: Optional[str] = Field(
default=None,
title="The device key to use for LPR.",
title="The device key to use for LPR",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
)
replace_rules: List[ReplaceRule] = Field(
default_factory=list,
title="List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'.",
title="List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'",
description="Regex replacement rules used to normalize detected plate strings before matching.",
)
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
enabled: bool = Field(
default=False,
title="Enable license plate recognition",
description="Enable or disable LPR globally; camera-level settings can override.",
)
expire_time: int = Field(
default=3,
title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).",
title="Expire plates not seen after number of seconds (for dedicated LPR cameras only)",
description="Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only).",
gt=0,
)
min_area: int = Field(
default=1000,
title="Minimum area of license plate to begin running recognition.",
title="Minimum area of license plate to begin running recognition",
description="Minimum plate area (pixels) required to attempt recognition.",
)
enhancement: int = Field(
default=0,
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
title="Amount of contrast adjustment and denoising to apply to license plate images before recognition",
description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results.",
ge=0,
le=10,
)
@ -314,12 +423,18 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
class CameraAudioTranscriptionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable audio transcription.")
enabled: bool = Field(
default=False,
title="Enable audio transcription",
description="Enable or disable automatic audio transcription.",
)
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of audio transcription."
)
live_enabled: Optional[bool] = Field(
default=False, title="Enable live transcriptions."
default=False,
title="Enable live transcriptions",
description="Enable streaming live transcription for audio as it is received.",
)
model_config = ConfigDict(extra="forbid", protected_namespaces=())

View File

@ -299,116 +299,189 @@ def verify_lpr_and_face(
class FrigateConfig(FrigateBaseModel):
version: Optional[str] = Field(default=None, title="Current config version.")
version: Optional[str] = Field(
default=None,
title="Current config version",
description="Numeric or string version of the active configuration to help detect migrations or format changes.",
)
safe_mode: bool = Field(
default=False, title="If Frigate should be started in safe mode."
default=False,
title="If Frigate should be started in safe mode",
description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.",
)
# Fields that install global state should be defined first, so that their validators run first.
environment_vars: EnvVars = Field(
default_factory=dict, title="Frigate environment variables."
default_factory=dict,
title="Frigate environment variables",
description="Key/value pairs of environment variables to set for the Frigate process.",
)
logger: LoggerConfig = Field(
default_factory=LoggerConfig,
title="Logging configuration.",
title="Logging",
description="Controls default log verbosity and per-component log level overrides.",
validate_default=True,
)
# Global config
auth: AuthConfig = Field(default_factory=AuthConfig, title="Auth configuration.")
auth: AuthConfig = Field(
default_factory=AuthConfig,
title="Authentication",
description="Authentication and session-related settings including cookie and rate limit options.",
)
database: DatabaseConfig = Field(
default_factory=DatabaseConfig, title="Database configuration."
default_factory=DatabaseConfig,
title="Database",
description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
)
go2rtc: RestreamConfig = Field(
default_factory=RestreamConfig, title="Global restream configuration."
default_factory=RestreamConfig,
title="go2rtc",
description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.",
)
mqtt: MqttConfig = Field(
title="MQTT",
description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
)
mqtt: MqttConfig = Field(title="MQTT configuration.")
notifications: NotificationConfig = Field(
default_factory=NotificationConfig, title="Global notification configuration."
default_factory=NotificationConfig,
title="Notifications",
description="Settings to enable and control notifications; can be overridden per-camera.",
)
networking: NetworkingConfig = Field(
default_factory=NetworkingConfig, title="Networking configuration"
default_factory=NetworkingConfig,
title="Networking",
description="Network-related settings such as IPv6 enablement for Frigate endpoints.",
)
proxy: ProxyConfig = Field(
default_factory=ProxyConfig, title="Proxy configuration."
default_factory=ProxyConfig,
title="Proxy",
description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
)
telemetry: TelemetryConfig = Field(
default_factory=TelemetryConfig, title="Telemetry configuration."
default_factory=TelemetryConfig,
title="Telemetry",
description="System telemetry and stats options including GPU and network bandwidth monitoring.",
)
tls: TlsConfig = Field(
default_factory=TlsConfig,
title="TLS",
description="TLS settings for Frigate's web endpoints (port 8971).",
)
ui: UIConfig = Field(
default_factory=UIConfig,
title="UI",
description="User interface preferences such as timezone, time/date formatting, and units.",
)
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
detectors: Dict[str, BaseDetectorConfig] = Field(
default=DEFAULT_DETECTORS,
title="Detector hardware configuration.",
title="Detector hardware",
description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
)
model: ModelConfig = Field(
default_factory=ModelConfig, title="Detection model configuration."
default_factory=ModelConfig,
title="Detection model",
description="Settings to configure a custom object detection model and its input shape.",
)
# GenAI config (named provider configs: name -> GenAIConfig)
genai: Dict[str, GenAIConfig] = Field(
default_factory=dict, title="Generative AI configuration (named providers)."
# GenAI config
genai: GenAIConfig = Field(
default_factory=GenAIConfig,
title="Generative AI",
description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
)
# Camera config
cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras")
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Global Audio events configuration."
default_factory=AudioConfig,
title="Audio events",
description="Settings for audio-based event detection; can be overridden per-camera.",
)
birdseye: BirdseyeConfig = Field(
default_factory=BirdseyeConfig, title="Birdseye configuration."
default_factory=BirdseyeConfig,
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
)
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Global object tracking configuration."
default_factory=DetectConfig,
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
)
ffmpeg: FfmpegConfig = Field(
default_factory=FfmpegConfig, title="Global FFmpeg configuration."
default_factory=FfmpegConfig,
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
default_factory=CameraLiveConfig,
title="Live playback",
description="Settings used by the Web UI to control live stream resolution and quality.",
)
motion: Optional[MotionConfig] = Field(
default=None, title="Global motion detection configuration."
default=None,
title="Motion detection",
description="Default motion detection settings applied to cameras unless overridden per-camera.",
)
objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Global object configuration."
default_factory=ObjectConfig,
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
)
record: RecordConfig = Field(
default_factory=RecordConfig, title="Global record configuration."
default_factory=RecordConfig,
title="Recording",
description="Recording and retention settings applied to cameras unless overridden per-camera.",
)
review: ReviewConfig = Field(
default_factory=ReviewConfig, title="Review configuration."
default_factory=ReviewConfig,
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration."
default_factory=SnapshotsConfig,
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects; can be overridden per-camera.",
)
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig,
title="Global timestamp style configuration.",
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
)
# Classification Config
audio_transcription: AudioTranscriptionConfig = Field(
default_factory=AudioTranscriptionConfig, title="Audio transcription config."
default_factory=AudioTranscriptionConfig,
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
)
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
default_factory=ClassificationConfig,
title="Object classification",
description="Settings for classification models used to refine object labels or state classification.",
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
default_factory=SemanticSearchConfig,
title="Semantic Search",
description="Settings for Semantic Search which builds and queries object embeddings to find similar items.",
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
default_factory=FaceRecognitionConfig,
title="Face recognition",
description="Settings for face detection and recognition; can be overridden per-camera.",
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
camera_groups: Dict[str, CameraGroupConfig] = Field(
default_factory=dict, title="Camera group configuration"
default_factory=dict,
title="Camera groups",
description="Configuration for named camera groups used to organize cameras in the UI.",
)
_plus_api: PlusApi

View File

@ -8,4 +8,8 @@ __all__ = ["DatabaseConfig"]
class DatabaseConfig(FrigateBaseModel):
path: str = Field(default=DEFAULT_DB_PATH, title="Database path.") # noqa: F821
path: str = Field(
default=DEFAULT_DB_PATH,
title="Database path",
description="Filesystem path where the Frigate SQLite database file will be stored.",
) # noqa: F821

View File

@ -9,9 +9,15 @@ __all__ = ["LoggerConfig"]
class LoggerConfig(FrigateBaseModel):
default: LogLevel = Field(default=LogLevel.info, title="Default logging level.")
default: LogLevel = Field(
default=LogLevel.info,
title="Default logging level",
description="Default global log verbosity (debug, info, warning, error).",
)
logs: dict[str, LogLevel] = Field(
default_factory=dict, title="Log level for specified processes."
default_factory=dict,
title="Log level for specified processes",
description="Per-component log level overrides to increase or decrease verbosity for specific modules.",
)
@model_validator(mode="after")

View File

@ -12,25 +12,73 @@ __all__ = ["MqttConfig"]
class MqttConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable MQTT Communication.")
host: str = Field(default="", title="MQTT Host")
port: int = Field(default=1883, title="MQTT Port")
topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
client_id: str = Field(default="frigate", title="MQTT Client ID")
enabled: bool = Field(
default=True,
title="Enable MQTT communication",
description="Enable or disable MQTT integration for state, events, and snapshots.",
)
host: str = Field(
default="",
title="MQTT host",
description="Hostname or IP address of the MQTT broker.",
)
port: int = Field(
default=1883,
title="MQTT port",
description="Port of the MQTT broker (usually 1883 for plain MQTT).",
)
topic_prefix: str = Field(
default="frigate",
title="MQTT topic prefix",
description="MQTT topic prefix for all Frigate topics; must be unique if running multiple instances.",
)
client_id: str = Field(
default="frigate",
title="MQTT client ID",
description="Client identifier used when connecting to the MQTT broker; should be unique per instance.",
)
stats_interval: int = Field(
default=60, ge=FREQUENCY_STATS_POINTS, title="MQTT Camera Stats Interval"
default=60,
ge=FREQUENCY_STATS_POINTS,
title="MQTT camera stats interval",
description="Interval in seconds for publishing system and camera stats to MQTT.",
)
user: Optional[EnvString] = Field(
default=None,
title="MQTT username",
description="Optional MQTT username; can be provided via environment variables or secrets.",
)
user: Optional[EnvString] = Field(default=None, title="MQTT Username")
password: Optional[EnvString] = Field(
default=None, title="MQTT Password", validate_default=True
default=None,
title="MQTT password",
description="Optional MQTT password; can be provided via environment variables or secrets.",
validate_default=True,
)
tls_ca_certs: Optional[str] = Field(
default=None,
title="MQTT TLS CA certificates",
description="Path to CA certificate for TLS connections to the broker (for self-signed certs).",
)
tls_ca_certs: Optional[str] = Field(default=None, title="MQTT TLS CA Certificates")
tls_client_cert: Optional[str] = Field(
default=None, title="MQTT TLS Client Certificate"
default=None,
title="MQTT TLS client certificate",
description="Client certificate path for TLS mutual authentication; do not set user/password when using client certs.",
)
tls_client_key: Optional[str] = Field(
default=None,
title="MQTT TLS client key",
description="Private key path for the client certificate.",
)
tls_insecure: Optional[bool] = Field(
default=None,
title="MQTT TLS insecure",
description="Allow insecure TLS connections by skipping hostname verification (not recommended).",
)
qos: int = Field(
default=0,
title="MQTT QoS",
description="Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2).",
)
tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key")
tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure")
qos: int = Field(default=0, title="MQTT QoS")
@model_validator(mode="after")
def user_requires_pass(self, info: ValidationInfo) -> Self:

View File

@ -8,15 +8,23 @@ __all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"]
class IPv6Config(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
enabled: bool = Field(
default=False,
title="Enable IPv6 for port 5000 and/or 8971",
description="Enable IPv6 support for Frigate services (API and UI) where applicable.",
)
class ListenConfig(FrigateBaseModel):
internal: Union[int, str] = Field(
default=5000, title="Internal listening port for Frigate"
default=5000,
title="Internal port",
description="Internal listening port for Frigate (default 5000).",
)
external: Union[int, str] = Field(
default=8971, title="External listening port for Frigate"
default=8971,
title="External port",
description="External listening port for Frigate (default 8971).",
)

View File

@ -10,36 +10,47 @@ __all__ = ["ProxyConfig", "HeaderMappingConfig"]
class HeaderMappingConfig(FrigateBaseModel):
user: str = Field(
default=None, title="Header name from upstream proxy to identify user."
default=None,
title="Header name from upstream proxy to identify user",
description="Header containing the authenticated username provided by the upstream proxy.",
)
role: str = Field(
default=None,
title="Header name from upstream proxy to identify user role.",
title="Header name from upstream proxy to identify user role",
description="Header containing the authenticated user's role or groups from the upstream proxy.",
)
role_map: Optional[dict[str, list[str]]] = Field(
default_factory=dict,
title=("Mapping of Frigate roles to upstream group values. "),
description="Map upstream group values to Frigate roles (for example map admin groups to the admin role).",
)
class ProxyConfig(FrigateBaseModel):
header_map: HeaderMappingConfig = Field(
default_factory=HeaderMappingConfig,
title="Header mapping definitions for proxy user passing.",
title="Header mapping definitions for proxy user passing",
description="Map incoming proxy headers to Frigate user and role fields for proxy-based auth.",
)
logout_url: Optional[str] = Field(
default=None, title="Redirect url for logging out with proxy."
default=None,
title="Redirect url for logging out with proxy",
description="URL to redirect users to when logging out via the proxy.",
)
auth_secret: Optional[EnvString] = Field(
default=None,
title="Secret value for proxy authentication.",
title="Secret value for proxy authentication",
description="Optional secret checked against the X-Proxy-Secret header to verify trusted proxies.",
)
default_role: Optional[str] = Field(
default="viewer", title="Default role for proxy users."
default="viewer",
title="Default role for proxy users",
description="Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer).",
)
separator: Optional[str] = Field(
default=",",
title="The character used to separate values in a mapped header.",
title="The character used to separate values in a mapped header",
description="Character used to split multiple values provided in proxy headers.",
)
@field_validator("separator", mode="before")

View File

@ -8,22 +8,41 @@ __all__ = ["TelemetryConfig", "StatsConfig"]
class StatsConfig(FrigateBaseModel):
amd_gpu_stats: bool = Field(default=True, title="Enable AMD GPU stats.")
intel_gpu_stats: bool = Field(default=True, title="Enable Intel GPU stats.")
amd_gpu_stats: bool = Field(
default=True,
title="Enable AMD GPU stats",
description="Enable collection of AMD GPU statistics if an AMD GPU is present.",
)
intel_gpu_stats: bool = Field(
default=True,
title="Enable Intel GPU stats",
description="Enable collection of Intel GPU statistics if an Intel GPU is present.",
)
network_bandwidth: bool = Field(
default=False, title="Enable network bandwidth for ffmpeg processes."
default=False,
title="Enable network bandwidth for ffmpeg processes",
description="Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities).",
)
intel_gpu_device: Optional[str] = Field(
default=None, title="Define the device to use when gathering SR-IOV stats."
default=None,
title="Define the device to use when gathering SR-IOV stats",
description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.",
)
class TelemetryConfig(FrigateBaseModel):
network_interfaces: list[str] = Field(
default=[],
title="Enabled network interfaces for bandwidth calculation.",
title="Enabled network interfaces for bandwidth calculation",
description="List of network interface name prefixes to monitor for bandwidth statistics.",
)
stats: StatsConfig = Field(
default_factory=StatsConfig, title="System Stats Configuration"
default_factory=StatsConfig,
title="System Stats",
description="Options to enable/disable collection of various system and GPU statistics.",
)
version_check: bool = Field(
default=True,
title="Enable latest version check",
description="Enable an outbound check to detect if a newer Frigate version is available.",
)
version_check: bool = Field(default=True, title="Enable latest version check.")

View File

@ -6,4 +6,8 @@ __all__ = ["TlsConfig"]
class TlsConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable TLS for port 8971")
enabled: bool = Field(
default=True,
title="Enable TLS for port 8971",
description="Enable TLS for Frigate's web UI and API on the configured TLS port.",
)

View File

@ -27,16 +27,28 @@ class UnitSystemEnum(str, Enum):
class UIConfig(FrigateBaseModel):
timezone: Optional[str] = Field(default=None, title="Override UI timezone.")
timezone: Optional[str] = Field(
default=None,
title="Override UI timezone",
description="Optional timezone to display across the UI (defaults to browser local time if unset).",
)
time_format: TimeFormatEnum = Field(
default=TimeFormatEnum.browser, title="Override UI time format."
default=TimeFormatEnum.browser,
title="Override UI time format",
description="Time format to use in the UI (browser, 12hour, or 24hour).",
)
date_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.short, title="Override UI dateStyle."
default=DateTimeStyleEnum.short,
title="Override UI dateStyle",
description="Date style to use in the UI (full, long, medium, short).",
)
time_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
default=DateTimeStyleEnum.medium,
title="Override UI timeStyle",
description="Time style to use in the UI (full, long, medium, short).",
)
unit_system: UnitSystemEnum = Field(
default=UnitSystemEnum.metric, title="The unit system to use for measurements."
default=UnitSystemEnum.metric,
title="The unit system to use for measurements",
description="Unit system for display (metric or imperial) used in the UI and MQTT.",
)

View File

@ -45,30 +45,55 @@ class ModelTypeEnum(str, Enum):
class ModelConfig(BaseModel):
path: Optional[str] = Field(None, title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field(
None, title="Label map for custom object detector."
path: Optional[str] = Field(
None,
title="Custom Object detection model path",
description="Path to a custom detection model file (or plus://<model_id> for Frigate+ models).",
)
labelmap_path: Optional[str] = Field(
None,
title="Label map for custom object detector",
description="Path to a labelmap file that maps numeric classes to string labels for the detector.",
)
width: int = Field(
default=320,
title="Object detection model input width",
description="Width of the model input tensor in pixels.",
)
height: int = Field(
default=320,
title="Object detection model input height",
description="Height of the model input tensor in pixels.",
)
width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field(
default_factory=dict, title="Labelmap customization."
default_factory=dict,
title="Labelmap customization",
description="Overrides or remapping entries to merge into the standard labelmap.",
)
attributes_map: Dict[str, list[str]] = Field(
default=DEFAULT_ATTRIBUTE_LABEL_MAP,
title="Map of object labels to their attribute labels.",
title="Map of object labels to their attribute labels",
description="Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate']).",
)
input_tensor: InputTensorEnum = Field(
default=InputTensorEnum.nhwc, title="Model Input Tensor Shape"
default=InputTensorEnum.nhwc,
title="Model Input Tensor Shape",
description="Tensor format expected by the model: 'nhwc' or 'nchw'.",
)
input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
default=PixelFormatEnum.rgb,
title="Model Input Pixel Color Format",
description="Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'.",
)
input_dtype: InputDTypeEnum = Field(
default=InputDTypeEnum.int, title="Model Input D Type"
default=InputDTypeEnum.int,
title="Model Input D Type",
description="Data type of the model input tensor (for example 'float32').",
)
model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
default=ModelTypeEnum.ssd,
title="Object Detection Model Type",
description="Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization.",
)
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
@ -210,12 +235,20 @@ class ModelConfig(BaseModel):
class BaseDetectorConfig(BaseModel):
# the type field must be defined in all subclasses
type: str = Field(default="cpu", title="Detector Type")
type: str = Field(
default="cpu",
title="Detector Type",
description="Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino').",
)
model: Optional[ModelConfig] = Field(
default=None, title="Detector specific model configuration."
default=None,
title="Detector specific model configuration",
description="Detector-specific model configuration options (path, input size, etc.).",
)
model_path: Optional[str] = Field(
default=None, title="Detector specific model path."
default=None,
title="Detector specific model path",
description="File path to the detector model binary if required by the chosen detector.",
)
model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()