Separate out face recognition frome semantic search

This commit is contained in:
Nicolas Mowen 2024-10-23 08:49:07 -06:00
parent 85c817d1ce
commit 539a886ab0
4 changed files with 32 additions and 23 deletions

View File

@ -56,7 +56,7 @@ from .logger import LoggerConfig
from .mqtt import MqttConfig
from .notification import NotificationConfig
from .proxy import ProxyConfig
from .semantic_search import SemanticSearchConfig
from .semantic_search import FaceRecognitionConfig, SemanticSearchConfig
from .telemetry import TelemetryConfig
from .tls import TlsConfig
from .ui import UIConfig
@ -159,6 +159,16 @@ class RestreamConfig(BaseModel):
model_config = ConfigDict(extra="allow")
def verify_semantic_search_dependent_configs(config: FrigateConfig) -> None:
"""Verify that semantic search is enabled if required features are enabled."""
if not config.semantic_search.enabled:
if config.genai.enabled:
raise ValueError("Genai requires semantic search to be enabled.")
if config.face_recognition.enabled:
raise ValueError("Face recognition requires semantic to be enabled.")
def verify_config_roles(camera_config: CameraConfig) -> None:
"""Verify that roles are setup in the config correctly."""
assigned_roles = list(
@ -316,6 +326,9 @@ class FrigateConfig(FrigateBaseModel):
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
@ -621,6 +634,7 @@ class FrigateConfig(FrigateBaseModel):
detector_config.model.compute_model_hash()
self.detectors[key] = detector_config
verify_semantic_search_dependent_configs(self)
return self
@field_validator("cameras")

View File

@ -7,6 +7,16 @@ from .base import FrigateBaseModel
__all__ = ["FaceRecognitionConfig", "SemanticSearchConfig"]
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
threshold: float = Field(
@ -15,16 +25,3 @@ class FaceRecognitionConfig(FrigateBaseModel):
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable semantic search.")
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)

View File

@ -11,7 +11,7 @@ from numpy import ndarray
from playhouse.shortcuts import model_to_dict
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config.semantic_search import SemanticSearchConfig
from frigate.config import FrigateConfig
from frigate.const import (
CONFIG_DIR,
FACE_DIR,
@ -63,7 +63,7 @@ class Embeddings:
"""SQLite-vec embeddings database."""
def __init__(
self, config: SemanticSearchConfig, db: SqliteVecQueueDatabase
self, config: FrigateConfig, db: SqliteVecQueueDatabase
) -> None:
self.config = config
self.db = db
@ -76,7 +76,7 @@ class Embeddings:
"jinaai/jina-clip-v1-text_model_fp16.onnx",
"jinaai/jina-clip-v1-tokenizer",
"jinaai/jina-clip-v1-vision_model_fp16.onnx"
if config.model_size == "large"
if config.semantic_search.model_size == "large"
else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
"jinaai/jina-clip-v1-preprocessor_config.json",
]
@ -97,7 +97,7 @@ class Embeddings:
download_urls={
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
model_size=config.model_size,
model_size=config.semantic_search.model_size,
model_type=ModelTypeEnum.text,
requestor=self.requestor,
device="CPU",
@ -105,7 +105,7 @@ class Embeddings:
model_file = (
"vision_model_fp16.onnx"
if self.config.model_size == "large"
if self.config.semantic_search.model_size == "large"
else "vision_model_quantized.onnx"
)

View File

@ -47,7 +47,7 @@ class EmbeddingMaintainer(threading.Thread):
) -> None:
super().__init__(name="embeddings_maintainer")
self.config = config
self.embeddings = Embeddings(config.semantic_search, db)
self.embeddings = Embeddings(config, db)
# Check if we need to re-index events
if config.semantic_search.reindex:
@ -62,9 +62,7 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager = SharedMemoryFrameManager()
# set face recognition conditions
self.face_recognition_enabled = (
self.config.semantic_search.face_recognition.enabled
)
self.face_recognition_enabled = self.config.face_recognition.enabled
self.requires_face_detection = "face" not in self.config.model.all_attributes
self.detected_faces: dict[str, float] = {}