diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index 498d1ab67..a51301777 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -50,8 +50,8 @@ Differently weighted versions of the Jina models are available and can be select ```yaml semantic_search: enabled: True - local_model: "jinav1" - local_model_size: small + model: "jinav1" + model_size: small ``` - Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable. @@ -68,8 +68,8 @@ To use the V2 model, update the `model` parameter in your config: ```yaml semantic_search: enabled: True - local_model: "jinav2" - local_model_size: large + model: "jinav2" + model_size: large ``` For most users, especially native English speakers, the V1 model remains the recommended choice. @@ -123,7 +123,7 @@ The CLIP models are downloaded in ONNX format, and the `large` model can be acce ```yaml semantic_search: enabled: True - local_model_size: large + model_size: large # Optional, if using the 'large' model in a multi-GPU installation device: 0 ``` diff --git a/frigate/config/classification.py b/frigate/config/classification.py index e77eb7a3f..c2f03810d 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -159,11 +159,11 @@ class SemanticSearchConfig(FrigateBaseModel): default=SemanticSearchProviderEnum.local, title="The semantic search provider to use.", ) - local_model: Optional[SemanticSearchModelEnum] = Field( + model: Optional[SemanticSearchModelEnum] = Field( default=SemanticSearchModelEnum.jinav1, title="The local CLIP model to use for semantic search.", ) - local_model_size: str = Field( + model_size: str = Field( default="small", title="The size of the local embeddings model used." ) device: Optional[str] = Field( diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 7e20c3409..fbef10766 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -108,13 +108,13 @@ class Embeddings: }, ) - if self.config.semantic_search.local_model == SemanticSearchModelEnum.jinav2: + if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2: # Single JinaV2Embedding instance for both text and vision self.embedding = JinaV2Embedding( - model_size=self.config.semantic_search.local_model_size, + model_size=self.config.semantic_search.model_size, requestor=self.requestor, device=config.semantic_search.device - or ("GPU" if config.semantic_search.local_model_size == "large" else "CPU"), + or ("GPU" if config.semantic_search.model_size == "large" else "CPU"), ) self.text_embedding = lambda input_data: self.embedding( input_data, embedding_type="text" @@ -124,15 +124,15 @@ class Embeddings: ) else: # Default to jinav1 self.text_embedding = JinaV1TextEmbedding( - model_size=config.semantic_search.local_model_size, + model_size=config.semantic_search.model_size, requestor=self.requestor, device="CPU", ) self.vision_embedding = JinaV1ImageEmbedding( - model_size=config.semantic_search.local_model_size, + model_size=config.semantic_search.model_size, requestor=self.requestor, device=config.semantic_search.device - or ("GPU" if config.semantic_search.local_model_size == "large" else "CPU"), + or ("GPU" if config.semantic_search.model_size == "large" else "CPU"), ) else: self.remote_embedding_client = get_embedding_client(self.config)