From 439312d7653e6bc0e5ba8508b23540f046080b0b Mon Sep 17 00:00:00 2001 From: Jason Hunter Date: Wed, 12 Jun 2024 21:08:19 -0400 Subject: [PATCH] ask ollama to pull requested model at startup --- frigate/genai/ollama.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/frigate/genai/ollama.py b/frigate/genai/ollama.py index ee32830fb..a23acce21 100644 --- a/frigate/genai/ollama.py +++ b/frigate/genai/ollama.py @@ -1,5 +1,6 @@ """Ollama Provider for Frigate AI.""" +import logging from typing import Optional from ollama import Client as ApiClient @@ -8,6 +9,8 @@ from ollama import ResponseError from frigate.config import GenAIProviderEnum from frigate.genai import GenAIClient, register_genai_provider +logger = logging.getLogger(__name__) + @register_genai_provider(GenAIProviderEnum.ollama) class OllamaClient(GenAIClient): @@ -17,7 +20,12 @@ class OllamaClient(GenAIClient): def _init_provider(self): """Initialize the client.""" - return ApiClient(host=self.genai_config.base_url) + client = ApiClient(host=self.genai_config.base_url) + response = client.pull(self.genai_config.model) + if response["status"] != "success": + logger.error("Failed to pull %s model from Ollama", self.genai_config.model) + return None + return client def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: """Submit a request to Ollama."""