Refactor genai config usage

This commit is contained in:
Nicolas Mowen 2025-08-08 08:50:30 -06:00
parent 5151a2516a
commit 3c62d9072c
4 changed files with 28 additions and 26 deletions

View File

@ -744,10 +744,10 @@ class Dispatcher:
def _on_genai_command(self, camera_name: str, payload: str) -> None: def _on_genai_command(self, camera_name: str, payload: str) -> None:
"""Callback for GenAI topic.""" """Callback for GenAI topic."""
genai_settings = self.config.cameras[camera_name].genai genai_settings = self.config.cameras[camera_name].objects.genai
if payload == "ON": if payload == "ON":
if not self.config.cameras[camera_name].genai.enabled_in_config: if not self.config.cameras[camera_name].objects.genai.enabled_in_config:
logger.error( logger.error(
"GenAI must be enabled in the config to be turned on via MQTT." "GenAI must be enabled in the config to be turned on via MQTT."
) )

View File

@ -28,7 +28,6 @@ from .audio import AudioConfig
from .birdseye import BirdseyeCameraConfig from .birdseye import BirdseyeCameraConfig
from .detect import DetectConfig from .detect import DetectConfig
from .ffmpeg import CameraFfmpegConfig, CameraInput from .ffmpeg import CameraFfmpegConfig, CameraInput
from .genai import GenAICameraConfig
from .live import CameraLiveConfig from .live import CameraLiveConfig
from .motion import MotionConfig from .motion import MotionConfig
from .mqtt import CameraMqttConfig from .mqtt import CameraMqttConfig
@ -71,9 +70,6 @@ class CameraConfig(FrigateBaseModel):
default_factory=CameraFaceRecognitionConfig, title="Face recognition config." default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
) )
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
genai: GenAICameraConfig = Field(
default_factory=GenAICameraConfig, title="Generative AI configuration."
)
live: CameraLiveConfig = Field( live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings." default_factory=CameraLiveConfig, title="Live playback settings."
) )

View File

@ -4,7 +4,7 @@ from pydantic import Field, PrivateAttr, field_serializer, field_validator
from ..base import FrigateBaseModel from ..base import FrigateBaseModel
__all__ = ["ObjectConfig", "FilterConfig"] __all__ = ["ObjectConfig", "GenAIObjectConfig", "FilterConfig"]
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]

View File

@ -30,7 +30,7 @@ from frigate.comms.recordings_updater import (
RecordingsDataTypeEnum, RecordingsDataTypeEnum,
) )
from frigate.comms.review_updater import ReviewDataSubscriber from frigate.comms.review_updater import ReviewDataSubscriber
from frigate.config import FrigateConfig from frigate.config import CameraConfig, FrigateConfig
from frigate.config.camera.camera import CameraTypeEnum from frigate.config.camera.camera import CameraTypeEnum
from frigate.config.camera.updater import ( from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
@ -329,7 +329,10 @@ class EmbeddingMaintainer(threading.Thread):
camera_config = self.config.cameras[camera] camera_config = self.config.cameras[camera]
# no need to process updated objects if face recognition, lpr, genai are disabled # no need to process updated objects if face recognition, lpr, genai are disabled
if not camera_config.genai.enabled and len(self.realtime_processors) == 0: if (
not camera_config.objects.genai.enabled
and len(self.realtime_processors) == 0
):
return return
# Create our own thumbnail based on the bounding box and the frame time # Create our own thumbnail based on the bounding box and the frame time
@ -367,23 +370,23 @@ class EmbeddingMaintainer(threading.Thread):
# check if we're configured to send an early request after a minimum number of updates received # check if we're configured to send an early request after a minimum number of updates received
if ( if (
self.genai_client is not None self.genai_client is not None
and camera_config.genai.send_triggers.after_significant_updates and camera_config.objects.genai.send_triggers.after_significant_updates
): ):
if ( if (
len(self.tracked_events.get(data["id"], [])) len(self.tracked_events.get(data["id"], []))
>= camera_config.genai.send_triggers.after_significant_updates >= camera_config.objects.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent and data["id"] not in self.early_request_sent
): ):
if data["has_clip"] and data["has_snapshot"]: if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"]) event: Event = Event.get(Event.id == data["id"])
if ( if (
not camera_config.genai.objects not camera_config.objects.genai.objects
or event.label in camera_config.genai.objects or event.label in camera_config.objects.genai.objects
) and ( ) and (
not camera_config.genai.required_zones not camera_config.objects.genai.required_zones
or set(data["entered_zones"]) or set(data["entered_zones"])
& set(camera_config.genai.required_zones) & set(camera_config.objects.genai.required_zones)
): ):
logger.debug(f"{camera} sending early request to GenAI") logger.debug(f"{camera} sending early request to GenAI")
@ -436,16 +439,17 @@ class EmbeddingMaintainer(threading.Thread):
# Run GenAI # Run GenAI
if ( if (
camera_config.genai.enabled camera_config.objects.genai.enabled
and camera_config.genai.send_triggers.tracked_object_end and camera_config.objects.genai.send_triggers.tracked_object_end
and self.genai_client is not None and self.genai_client is not None
and ( and (
not camera_config.genai.objects not camera_config.objects.genai.objects
or event.label in camera_config.genai.objects or event.label in camera_config.objects.genai.objects
) )
and ( and (
not camera_config.genai.required_zones not camera_config.objects.genai.required_zones
or set(event.zones) & set(camera_config.genai.required_zones) or set(event.zones)
& set(camera_config.objects.genai.required_zones)
) )
): ):
self._process_genai_description(event, camera_config, thumbnail) self._process_genai_description(event, camera_config, thumbnail)
@ -624,8 +628,10 @@ class EmbeddingMaintainer(threading.Thread):
self.embeddings.embed_thumbnail(event_id, thumbnail) self.embeddings.embed_thumbnail(event_id, thumbnail)
def _process_genai_description(self, event, camera_config, thumbnail) -> None: def _process_genai_description(
if event.has_snapshot and camera_config.genai.use_snapshot: self, event: Event, camera_config: CameraConfig, thumbnail
) -> None:
if event.has_snapshot and camera_config.objects.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event, camera_config) snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image: if not snapshot_image:
return return
@ -637,7 +643,7 @@ class EmbeddingMaintainer(threading.Thread):
embed_image = ( embed_image = (
[snapshot_image] [snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot if event.has_snapshot and camera_config.objects.genai.use_snapshot
else ( else (
[data["thumbnail"] for data in self.tracked_events[event.id]] [data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0 if num_thumbnails > 0
@ -645,7 +651,7 @@ class EmbeddingMaintainer(threading.Thread):
) )
) )
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0: if camera_config.objects.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}") logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir( Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
@ -775,7 +781,7 @@ class EmbeddingMaintainer(threading.Thread):
return return
camera_config = self.config.cameras[event.camera] camera_config = self.config.cameras[event.camera]
if not camera_config.genai.enabled and not force: if not camera_config.objects.genai.enabled and not force:
logger.error(f"GenAI not enabled for camera {event.camera}") logger.error(f"GenAI not enabled for camera {event.camera}")
return return