Set default values for LLM performance (#20606)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions

This commit is contained in:
Nicolas Mowen 2025-10-21 16:10:00 -06:00 committed by GitHub
parent 21ff257705
commit 007371019a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,7 +1,7 @@
"""Ollama Provider for Frigate AI."""
import logging
from typing import Optional
from typing import Any, Optional
from httpx import TimeoutException
from ollama import Client as ApiClient
@ -17,10 +17,24 @@ logger = logging.getLogger(__name__)
class OllamaClient(GenAIClient):
"""Generative AI client for Frigate using Ollama."""
LOCAL_OPTIMIZED_OPTIONS = {
"options": {
"temperature": 0.5,
"repeat_penalty": 1.15,
"presence_penalty": 0.1,
},
}
provider: ApiClient
provider_options: dict[str, Any]
def _init_provider(self):
"""Initialize the client."""
self.provider_options = {
**self.LOCAL_OPTIMIZED_OPTIONS,
**self.genai_config.provider_options,
}
try:
client = ApiClient(host=self.genai_config.base_url, timeout=self.timeout)
# ensure the model is available locally
@ -48,7 +62,7 @@ class OllamaClient(GenAIClient):
self.genai_config.model,
prompt,
images=images if images else None,
**self.genai_config.provider_options,
**self.provider_options,
)
return result["response"].strip()
except (TimeoutException, ResponseError) as e: