Miscellaneous fixes (0.17 beta) (#21655)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled

* Fix jetson stats reading

* Return result

* Avoid unknown class for cover image

* fix double encoding of passwords in camera wizard

* formatting

* empty homekit config fixes

* add locks to jina v1 embeddings

protect tokenizer and feature extractor in jina_v1_embedding with per-instance thread lock to avoid the "Already borrowed" RuntimeError during concurrent tokenization

* Capitalize correctly

* replace deprecated google-generativeai with google-genai

update gemini genai provider with new calls from SDK
provider_options specifies any http options
suppress unneeded info logging

* fix attribute area on detail stream hover

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen 2026-01-15 07:08:49 -07:00 committed by GitHub
parent 2e1706baa0
commit bf099c3edd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 83 additions and 58 deletions

View File

@ -47,7 +47,7 @@ onnxruntime == 1.22.*
# Embeddings
transformers == 4.45.*
# Generative AI
google-generativeai == 0.8.*
google-genai == 1.58.*
ollama == 0.6.*
openai == 1.65.*
# push notifications

View File

@ -69,15 +69,15 @@ function setup_homekit_config() {
local cleaned_json="/tmp/cache/homekit_cleaned.json"
jq '
# Keep only the homekit section if it exists, otherwise empty object
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
if has("homekit") then {homekit: .homekit} else {} end
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || {
echo '{"homekit": {}}' > "${cleaned_json}"
echo '{}' > "${cleaned_json}"
}
# Convert back to YAML and write to the config file
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
echo 'homekit: {}' > "${config_path}"
echo '{}' > "${config_path}"
}
# Clean up temp files

View File

@ -848,9 +848,10 @@ async def onvif_probe(
try:
if isinstance(uri, str) and uri.startswith("rtsp://"):
if username and password and "@" not in uri:
# Inject URL-encoded credentials and add only the
# authenticated version.
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
# Inject raw credentials and add only the
# authenticated version. The credentials will be encoded
# later by ffprobe_stream or the config system.
cred = f"{username}:{password}@"
injected = uri.replace(
"rtsp://", f"rtsp://{cred}", 1
)
@ -903,12 +904,8 @@ async def onvif_probe(
"/cam/realmonitor?channel=1&subtype=0",
"/11",
]
# Use URL-encoded credentials for pattern fallback URIs when provided
auth_str = (
f"{quote_plus(username)}:{quote_plus(password)}@"
if username and password
else ""
)
# Use raw credentials for pattern fallback URIs when provided
auth_str = f"{username}:{password}@" if username and password else ""
rtsp_port = 554
for path in common_paths:
uri = f"rtsp://{auth_str}{host}:{rtsp_port}{path}"
@ -930,7 +927,7 @@ async def onvif_probe(
and uri.startswith("rtsp://")
and "@" not in uri
):
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
cred = f"{username}:{password}@"
cred_uri = uri.replace("rtsp://", f"rtsp://{cred}", 1)
if cred_uri not in to_test:
to_test.append(cred_uri)

View File

@ -2,6 +2,7 @@
import logging
import os
import threading
import warnings
from transformers import AutoFeatureExtractor, AutoTokenizer
@ -54,6 +55,7 @@ class JinaV1TextEmbedding(BaseEmbedding):
self.tokenizer = None
self.feature_extractor = None
self.runner = None
self._lock = threading.Lock()
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
if not all(
@ -134,6 +136,7 @@ class JinaV1TextEmbedding(BaseEmbedding):
)
def _preprocess_inputs(self, raw_inputs):
with self._lock:
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
return [
self.tokenizer(
@ -174,6 +177,7 @@ class JinaV1ImageEmbedding(BaseEmbedding):
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.feature_extractor = None
self.runner: BaseModelRunner | None = None
self._lock = threading.Lock()
files_names = list(self.download_urls.keys())
if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
@ -216,6 +220,7 @@ class JinaV1ImageEmbedding(BaseEmbedding):
)
def _preprocess_inputs(self, raw_inputs):
with self._lock:
processed_images = [self._process_image(img) for img in raw_inputs]
return [
self.feature_extractor(images=image, return_tensors="np")

View File

@ -3,8 +3,8 @@
import logging
from typing import Optional
import google.generativeai as genai
from google.api_core.exceptions import GoogleAPICallError
from google import genai
from google.genai import errors, types
from frigate.config import GenAIProviderEnum
from frigate.genai import GenAIClient, register_genai_provider
@ -16,44 +16,51 @@ logger = logging.getLogger(__name__)
class GeminiClient(GenAIClient):
"""Generative AI client for Frigate using Gemini."""
provider: genai.GenerativeModel
provider: genai.Client
def _init_provider(self):
"""Initialize the client."""
genai.configure(api_key=self.genai_config.api_key)
return genai.GenerativeModel(
self.genai_config.model, **self.genai_config.provider_options
# Merge provider_options into HttpOptions
http_options_dict = {
"api_version": "v1",
"timeout": int(self.timeout * 1000), # requires milliseconds
}
if isinstance(self.genai_config.provider_options, dict):
http_options_dict.update(self.genai_config.provider_options)
return genai.Client(
api_key=self.genai_config.api_key,
http_options=types.HttpOptions(**http_options_dict),
)
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to Gemini."""
data = [
{
"mime_type": "image/jpeg",
"data": img,
}
for img in images
contents = [
types.Part.from_bytes(data=img, mime_type="image/jpeg") for img in images
] + [prompt]
try:
# Merge runtime_options into generation_config if provided
generation_config_dict = {"candidate_count": 1}
generation_config_dict.update(self.genai_config.runtime_options)
response = self.provider.generate_content(
data,
generation_config=genai.types.GenerationConfig(
**generation_config_dict
),
request_options=genai.types.RequestOptions(
timeout=self.timeout,
response = self.provider.models.generate_content(
model=self.genai_config.model,
contents=contents,
config=types.GenerateContentConfig(
**generation_config_dict,
),
)
except GoogleAPICallError as e:
except errors.APIError as e:
logger.warning("Gemini returned an error: %s", str(e))
return None
except Exception as e:
logger.warning("An unexpected error occurred with Gemini: %s", str(e))
return None
try:
description = response.text.strip()
except ValueError:
except (ValueError, AttributeError):
# No description was generated
return None
return description

View File

@ -89,6 +89,7 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
"ws4py": LogLevel.error,
"PIL": LogLevel.warning,
"numba": LogLevel.warning,
"google_genai.models": LogLevel.warning,
**log_levels,
}

View File

@ -540,9 +540,16 @@ def get_jetson_stats() -> Optional[dict[int, dict]]:
try:
results["mem"] = "-" # no discrete gpu memory
if os.path.exists("/sys/devices/gpu.0/load"):
with open("/sys/devices/gpu.0/load", "r") as f:
gpuload = float(f.readline()) / 10
results["gpu"] = f"{gpuload}%"
elif os.path.exists("/sys/devices/platform/gpu.0/load"):
with open("/sys/devices/platform/gpu.0/load", "r") as f:
gpuload = float(f.readline()) / 10
results["gpu"] = f"{gpuload}%"
else:
results["gpu"] = "-"
except Exception:
return None

View File

@ -887,7 +887,10 @@ function LifecycleItem({
</span>
<span className="font-medium text-foreground">
{attributeAreaPx}{" "}
{t("information.pixels", { ns: "common" })}{" "}
{t("information.pixels", {
ns: "common",
area: attributeAreaPx,
})}{" "}
<span className="text-secondary-foreground">·</span>{" "}
{attributeAreaPct}%
</span>

View File

@ -81,7 +81,8 @@ export async function detectReolinkCamera(
export function maskUri(uri: string): string {
try {
// Handle RTSP URLs with user:pass@host format
const rtspMatch = uri.match(/rtsp:\/\/([^:]+):([^@]+)@(.+)/);
// Use greedy match for password to handle passwords with @
const rtspMatch = uri.match(/rtsp:\/\/([^:]+):(.+)@(.+)/);
if (rtspMatch) {
return `rtsp://${rtspMatch[1]}:${"*".repeat(4)}@${rtspMatch[3]}`;
}

View File

@ -266,7 +266,10 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
return undefined;
}
const keys = Object.keys(dataset.categories).filter((key) => key != "none");
const keys = Object.keys(dataset.categories).filter(
(key) => key != "none" && key.toLowerCase() != "unknown",
);
if (keys.length === 0) {
return undefined;
}

View File

@ -75,6 +75,7 @@ import SearchDetailDialog, {
} from "@/components/overlay/detail/SearchDetailDialog";
import { SearchResult } from "@/types/search";
import { HiSparkles } from "react-icons/hi";
import { capitalizeFirstLetter } from "@/utils/stringUtil";
type ModelTrainingViewProps = {
model: CustomClassificationModelConfig;
@ -88,7 +89,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
// title
useEffect(() => {
document.title = `${model.name.toUpperCase()} - ${t("documentTitle")}`;
document.title = `${capitalizeFirstLetter(model.name)} - ${t("documentTitle")}`;
}, [model.name, t]);
// model state