Compare commits

..

No commits in common. "13209090f23ff1c55e265b6645c73036cc91b2e7" and "7bc2ef731b3d145d6cf0427ea4e2dd6ca837a3d4" have entirely different histories.

6 changed files with 40 additions and 57 deletions

View File

@ -47,7 +47,7 @@ onnxruntime == 1.22.*
# Embeddings # Embeddings
transformers == 4.45.* transformers == 4.45.*
# Generative AI # Generative AI
google-genai == 1.58.* google-generativeai == 0.8.*
ollama == 0.6.* ollama == 0.6.*
openai == 1.65.* openai == 1.65.*
# push notifications # push notifications

View File

@ -2,7 +2,6 @@
import logging import logging
import os import os
import threading
import warnings import warnings
from transformers import AutoFeatureExtractor, AutoTokenizer from transformers import AutoFeatureExtractor, AutoTokenizer
@ -55,7 +54,6 @@ class JinaV1TextEmbedding(BaseEmbedding):
self.tokenizer = None self.tokenizer = None
self.feature_extractor = None self.feature_extractor = None
self.runner = None self.runner = None
self._lock = threading.Lock()
files_names = list(self.download_urls.keys()) + [self.tokenizer_file] files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
if not all( if not all(
@ -136,18 +134,17 @@ class JinaV1TextEmbedding(BaseEmbedding):
) )
def _preprocess_inputs(self, raw_inputs): def _preprocess_inputs(self, raw_inputs):
with self._lock: max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs) return [
return [ self.tokenizer(
self.tokenizer( text,
text, padding="max_length",
padding="max_length", truncation=True,
truncation=True, max_length=max_length,
max_length=max_length, return_tensors="np",
return_tensors="np", )
) for text in raw_inputs
for text in raw_inputs ]
]
class JinaV1ImageEmbedding(BaseEmbedding): class JinaV1ImageEmbedding(BaseEmbedding):
@ -177,7 +174,6 @@ class JinaV1ImageEmbedding(BaseEmbedding):
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.feature_extractor = None self.feature_extractor = None
self.runner: BaseModelRunner | None = None self.runner: BaseModelRunner | None = None
self._lock = threading.Lock()
files_names = list(self.download_urls.keys()) files_names = list(self.download_urls.keys())
if not all( if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names os.path.exists(os.path.join(self.download_path, n)) for n in files_names
@ -220,9 +216,8 @@ class JinaV1ImageEmbedding(BaseEmbedding):
) )
def _preprocess_inputs(self, raw_inputs): def _preprocess_inputs(self, raw_inputs):
with self._lock: processed_images = [self._process_image(img) for img in raw_inputs]
processed_images = [self._process_image(img) for img in raw_inputs] return [
return [ self.feature_extractor(images=image, return_tensors="np")
self.feature_extractor(images=image, return_tensors="np") for image in processed_images
for image in processed_images ]
]

View File

@ -3,8 +3,8 @@
import logging import logging
from typing import Optional from typing import Optional
from google import genai import google.generativeai as genai
from google.genai import errors, types from google.api_core.exceptions import GoogleAPICallError
from frigate.config import GenAIProviderEnum from frigate.config import GenAIProviderEnum
from frigate.genai import GenAIClient, register_genai_provider from frigate.genai import GenAIClient, register_genai_provider
@ -16,51 +16,44 @@ logger = logging.getLogger(__name__)
class GeminiClient(GenAIClient): class GeminiClient(GenAIClient):
"""Generative AI client for Frigate using Gemini.""" """Generative AI client for Frigate using Gemini."""
provider: genai.Client provider: genai.GenerativeModel
def _init_provider(self): def _init_provider(self):
"""Initialize the client.""" """Initialize the client."""
# Merge provider_options into HttpOptions genai.configure(api_key=self.genai_config.api_key)
http_options_dict = { return genai.GenerativeModel(
"api_version": "v1", self.genai_config.model, **self.genai_config.provider_options
"timeout": int(self.timeout * 1000), # requires milliseconds
}
if isinstance(self.genai_config.provider_options, dict):
http_options_dict.update(self.genai_config.provider_options)
return genai.Client(
api_key=self.genai_config.api_key,
http_options=types.HttpOptions(**http_options_dict),
) )
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to Gemini.""" """Submit a request to Gemini."""
contents = [ data = [
types.Part.from_bytes(data=img, mime_type="image/jpeg") for img in images {
"mime_type": "image/jpeg",
"data": img,
}
for img in images
] + [prompt] ] + [prompt]
try: try:
# Merge runtime_options into generation_config if provided # Merge runtime_options into generation_config if provided
generation_config_dict = {"candidate_count": 1} generation_config_dict = {"candidate_count": 1}
generation_config_dict.update(self.genai_config.runtime_options) generation_config_dict.update(self.genai_config.runtime_options)
response = self.provider.models.generate_content( response = self.provider.generate_content(
model=self.genai_config.model, data,
contents=contents, generation_config=genai.types.GenerationConfig(
config=types.GenerateContentConfig( **generation_config_dict
**generation_config_dict, ),
request_options=genai.types.RequestOptions(
timeout=self.timeout,
), ),
) )
except errors.APIError as e: except GoogleAPICallError as e:
logger.warning("Gemini returned an error: %s", str(e)) logger.warning("Gemini returned an error: %s", str(e))
return None return None
except Exception as e:
logger.warning("An unexpected error occurred with Gemini: %s", str(e))
return None
try: try:
description = response.text.strip() description = response.text.strip()
except (ValueError, AttributeError): except ValueError:
# No description was generated # No description was generated
return None return None
return description return description

View File

@ -89,7 +89,6 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
"ws4py": LogLevel.error, "ws4py": LogLevel.error,
"PIL": LogLevel.warning, "PIL": LogLevel.warning,
"numba": LogLevel.warning, "numba": LogLevel.warning,
"google_genai.models": LogLevel.warning,
**log_levels, **log_levels,
} }

View File

@ -887,10 +887,7 @@ function LifecycleItem({
</span> </span>
<span className="font-medium text-foreground"> <span className="font-medium text-foreground">
{attributeAreaPx}{" "} {attributeAreaPx}{" "}
{t("information.pixels", { {t("information.pixels", { ns: "common" })}{" "}
ns: "common",
area: attributeAreaPx,
})}{" "}
<span className="text-secondary-foreground">·</span>{" "} <span className="text-secondary-foreground">·</span>{" "}
{attributeAreaPct}% {attributeAreaPct}%
</span> </span>

View File

@ -75,7 +75,6 @@ import SearchDetailDialog, {
} from "@/components/overlay/detail/SearchDetailDialog"; } from "@/components/overlay/detail/SearchDetailDialog";
import { SearchResult } from "@/types/search"; import { SearchResult } from "@/types/search";
import { HiSparkles } from "react-icons/hi"; import { HiSparkles } from "react-icons/hi";
import { capitalizeFirstLetter } from "@/utils/stringUtil";
type ModelTrainingViewProps = { type ModelTrainingViewProps = {
model: CustomClassificationModelConfig; model: CustomClassificationModelConfig;
@ -89,7 +88,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
// title // title
useEffect(() => { useEffect(() => {
document.title = `${capitalizeFirstLetter(model.name)} - ${t("documentTitle")}`; document.title = `${model.name.toUpperCase()} - ${t("documentTitle")}`;
}, [model.name, t]); }, [model.name, t]);
// model state // model state