mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-23 04:28:32 +03:00
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* Fix jetson stats reading * Return result * Avoid unknown class for cover image * fix double encoding of passwords in camera wizard * formatting * empty homekit config fixes * add locks to jina v1 embeddings protect tokenizer and feature extractor in jina_v1_embedding with per-instance thread lock to avoid the "Already borrowed" RuntimeError during concurrent tokenization * Capitalize correctly * replace deprecated google-generativeai with google-genai update gemini genai provider with new calls from SDK provider_options specifies any http options suppress unneeded info logging * fix attribute area on detail stream hover --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
72 lines
2.3 KiB
Python
72 lines
2.3 KiB
Python
"""Gemini Provider for Frigate AI."""
|
|
|
|
import logging
|
|
from typing import Optional
|
|
|
|
from google import genai
|
|
from google.genai import errors, types
|
|
|
|
from frigate.config import GenAIProviderEnum
|
|
from frigate.genai import GenAIClient, register_genai_provider
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@register_genai_provider(GenAIProviderEnum.gemini)
|
|
class GeminiClient(GenAIClient):
|
|
"""Generative AI client for Frigate using Gemini."""
|
|
|
|
provider: genai.Client
|
|
|
|
def _init_provider(self):
|
|
"""Initialize the client."""
|
|
# Merge provider_options into HttpOptions
|
|
http_options_dict = {
|
|
"api_version": "v1",
|
|
"timeout": int(self.timeout * 1000), # requires milliseconds
|
|
}
|
|
|
|
if isinstance(self.genai_config.provider_options, dict):
|
|
http_options_dict.update(self.genai_config.provider_options)
|
|
|
|
return genai.Client(
|
|
api_key=self.genai_config.api_key,
|
|
http_options=types.HttpOptions(**http_options_dict),
|
|
)
|
|
|
|
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
|
"""Submit a request to Gemini."""
|
|
contents = [
|
|
types.Part.from_bytes(data=img, mime_type="image/jpeg") for img in images
|
|
] + [prompt]
|
|
try:
|
|
# Merge runtime_options into generation_config if provided
|
|
generation_config_dict = {"candidate_count": 1}
|
|
generation_config_dict.update(self.genai_config.runtime_options)
|
|
|
|
response = self.provider.models.generate_content(
|
|
model=self.genai_config.model,
|
|
contents=contents,
|
|
config=types.GenerateContentConfig(
|
|
**generation_config_dict,
|
|
),
|
|
)
|
|
except errors.APIError as e:
|
|
logger.warning("Gemini returned an error: %s", str(e))
|
|
return None
|
|
except Exception as e:
|
|
logger.warning("An unexpected error occurred with Gemini: %s", str(e))
|
|
return None
|
|
|
|
try:
|
|
description = response.text.strip()
|
|
except (ValueError, AttributeError):
|
|
# No description was generated
|
|
return None
|
|
return description
|
|
|
|
def get_context_size(self) -> int:
|
|
"""Get the context window size for Gemini."""
|
|
# Gemini Pro Vision has a 1M token context window
|
|
return 1000000
|