Compare commits

..

No commits in common. "4cdea4b93de51f7a9c14fdf955e3539589865d0e" and "6aafa0c6f9b223b3e1f8aea4b874f15fc836f35d" have entirely different histories.

8 changed files with 5 additions and 24 deletions

View File

@ -54,8 +54,8 @@ function setup_homekit_config() {
local config_path="$1" local config_path="$1"
if [[ ! -f "${config_path}" ]]; then if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty config file for HomeKit..." echo "[INFO] Creating empty HomeKit config file..."
echo '{}' > "${config_path}" echo 'homekit: {}' > "${config_path}"
fi fi
# Convert YAML to JSON for jq processing # Convert YAML to JSON for jq processing

View File

@ -696,9 +696,6 @@ genai:
# Optional additional args to pass to the GenAI Provider (default: None) # Optional additional args to pass to the GenAI Provider (default: None)
provider_options: provider_options:
keep_alive: -1 keep_alive: -1
# Optional: Options to pass during inference calls (default: {})
runtime_options:
temperature: 0.7
# Optional: Configuration for audio transcription # Optional: Configuration for audio transcription
# NOTE: only the enabled option can be overridden at the camera level # NOTE: only the enabled option can be overridden at the camera level

View File

@ -26,6 +26,3 @@ class GenAIConfig(FrigateBaseModel):
provider_options: dict[str, Any] = Field( provider_options: dict[str, Any] = Field(
default={}, title="GenAI Provider extra options." default={}, title="GenAI Provider extra options."
) )
runtime_options: dict[str, Any] = Field(
default={}, title="Options to pass during inference calls."
)

View File

@ -64,7 +64,6 @@ class OpenAIClient(GenAIClient):
}, },
], ],
timeout=self.timeout, timeout=self.timeout,
**self.genai_config.runtime_options,
) )
except Exception as e: except Exception as e:
logger.warning("Azure OpenAI returned an error: %s", str(e)) logger.warning("Azure OpenAI returned an error: %s", str(e))

View File

@ -35,14 +35,10 @@ class GeminiClient(GenAIClient):
for img in images for img in images
] + [prompt] ] + [prompt]
try: try:
# Merge runtime_options into generation_config if provided
generation_config_dict = {"candidate_count": 1}
generation_config_dict.update(self.genai_config.runtime_options)
response = self.provider.generate_content( response = self.provider.generate_content(
data, data,
generation_config=genai.types.GenerationConfig( generation_config=genai.types.GenerationConfig(
**generation_config_dict candidate_count=1,
), ),
request_options=genai.types.RequestOptions( request_options=genai.types.RequestOptions(
timeout=self.timeout, timeout=self.timeout,

View File

@ -58,15 +58,11 @@ class OllamaClient(GenAIClient):
) )
return None return None
try: try:
ollama_options = {
**self.provider_options,
**self.genai_config.runtime_options,
}
result = self.provider.generate( result = self.provider.generate(
self.genai_config.model, self.genai_config.model,
prompt, prompt,
images=images if images else None, images=images if images else None,
**ollama_options, **self.provider_options,
) )
logger.debug( logger.debug(
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}" f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"

View File

@ -61,7 +61,6 @@ class OpenAIClient(GenAIClient):
}, },
], ],
timeout=self.timeout, timeout=self.timeout,
**self.genai_config.runtime_options,
) )
if ( if (
result is not None result is not None

View File

@ -15,9 +15,6 @@
}, },
"provider_options": { "provider_options": {
"label": "GenAI Provider extra options." "label": "GenAI Provider extra options."
},
"runtime_options": {
"label": "Options to pass during inference calls."
} }
} }
} }