mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-22 20:18:30 +03:00
Miscellaneous fixes (0.17 beta) (#21607)
Some checks are pending
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
Some checks are pending
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
* Strip model name before training * Handle options file for go2rtc option * Make reviewed optional and add null to API call * Send reviewed for dashboard * Allow setting context size for openai compatible endpoints * push empty go2rtc config to avoid homekit error in log * Add option to set runtime options for LLM providers * Docs --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
parent
91cc6747b6
commit
2c34e1ec10
@ -54,8 +54,8 @@ function setup_homekit_config() {
|
|||||||
local config_path="$1"
|
local config_path="$1"
|
||||||
|
|
||||||
if [[ ! -f "${config_path}" ]]; then
|
if [[ ! -f "${config_path}" ]]; then
|
||||||
echo "[INFO] Creating empty HomeKit config file..."
|
echo "[INFO] Creating empty config file for HomeKit..."
|
||||||
echo 'homekit: {}' > "${config_path}"
|
echo '{}' > "${config_path}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Convert YAML to JSON for jq processing
|
# Convert YAML to JSON for jq processing
|
||||||
|
|||||||
@ -23,8 +23,28 @@ sys.path.remove("/opt/frigate")
|
|||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
|
|
||||||
# Check if arbitrary exec sources are allowed (defaults to False for security)
|
# Check if arbitrary exec sources are allowed (defaults to False for security)
|
||||||
ALLOW_ARBITRARY_EXEC = os.environ.get(
|
allow_arbitrary_exec = None
|
||||||
"GO2RTC_ALLOW_ARBITRARY_EXEC", "false"
|
if "GO2RTC_ALLOW_ARBITRARY_EXEC" in os.environ:
|
||||||
|
allow_arbitrary_exec = os.environ.get("GO2RTC_ALLOW_ARBITRARY_EXEC")
|
||||||
|
elif (
|
||||||
|
os.path.isdir("/run/secrets")
|
||||||
|
and os.access("/run/secrets", os.R_OK)
|
||||||
|
and "GO2RTC_ALLOW_ARBITRARY_EXEC" in os.listdir("/run/secrets")
|
||||||
|
):
|
||||||
|
allow_arbitrary_exec = (
|
||||||
|
Path(os.path.join("/run/secrets", "GO2RTC_ALLOW_ARBITRARY_EXEC"))
|
||||||
|
.read_text()
|
||||||
|
.strip()
|
||||||
|
)
|
||||||
|
# check for the add-on options file
|
||||||
|
elif os.path.isfile("/data/options.json"):
|
||||||
|
with open("/data/options.json") as f:
|
||||||
|
raw_options = f.read()
|
||||||
|
options = json.loads(raw_options)
|
||||||
|
allow_arbitrary_exec = options.get("go2rtc_allow_arbitrary_exec")
|
||||||
|
|
||||||
|
ALLOW_ARBITRARY_EXEC = allow_arbitrary_exec is not None and str(
|
||||||
|
allow_arbitrary_exec
|
||||||
).lower() in ("true", "1", "yes")
|
).lower() in ("true", "1", "yes")
|
||||||
|
|
||||||
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||||
|
|||||||
@ -42,7 +42,7 @@ If you are trying to use a single model for Frigate and HomeAssistant, it will n
|
|||||||
The following models are recommended:
|
The following models are recommended:
|
||||||
|
|
||||||
| Model | Notes |
|
| Model | Notes |
|
||||||
| ----------------- | -------------------------------------------------------------------- |
|
| ------------- | -------------------------------------------------------------------- |
|
||||||
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
||||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||||
@ -120,6 +120,23 @@ To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` env
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
provider: openai
|
||||||
|
base_url: http://your-llama-server
|
||||||
|
model: your-model-name
|
||||||
|
provider_options:
|
||||||
|
context_size: 8192 # Specify the configured context size
|
||||||
|
```
|
||||||
|
|
||||||
|
This ensures Frigate uses the correct context window size when generating prompts.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## Azure OpenAI
|
## Azure OpenAI
|
||||||
|
|
||||||
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||||
|
|||||||
@ -696,6 +696,9 @@ genai:
|
|||||||
# Optional additional args to pass to the GenAI Provider (default: None)
|
# Optional additional args to pass to the GenAI Provider (default: None)
|
||||||
provider_options:
|
provider_options:
|
||||||
keep_alive: -1
|
keep_alive: -1
|
||||||
|
# Optional: Options to pass during inference calls (default: {})
|
||||||
|
runtime_options:
|
||||||
|
temperature: 0.7
|
||||||
|
|
||||||
# Optional: Configuration for audio transcription
|
# Optional: Configuration for audio transcription
|
||||||
# NOTE: only the enabled option can be overridden at the camera level
|
# NOTE: only the enabled option can be overridden at the camera level
|
||||||
|
|||||||
@ -10,7 +10,7 @@ class ReviewQueryParams(BaseModel):
|
|||||||
cameras: str = "all"
|
cameras: str = "all"
|
||||||
labels: str = "all"
|
labels: str = "all"
|
||||||
zones: str = "all"
|
zones: str = "all"
|
||||||
reviewed: int = 0
|
reviewed: Union[int, SkipJsonSchema[None]] = None
|
||||||
limit: Union[int, SkipJsonSchema[None]] = None
|
limit: Union[int, SkipJsonSchema[None]] = None
|
||||||
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
|
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
|
||||||
before: Union[float, SkipJsonSchema[None]] = None
|
before: Union[float, SkipJsonSchema[None]] = None
|
||||||
|
|||||||
@ -26,3 +26,6 @@ class GenAIConfig(FrigateBaseModel):
|
|||||||
provider_options: dict[str, Any] = Field(
|
provider_options: dict[str, Any] = Field(
|
||||||
default={}, title="GenAI Provider extra options."
|
default={}, title="GenAI Provider extra options."
|
||||||
)
|
)
|
||||||
|
runtime_options: dict[str, Any] = Field(
|
||||||
|
default={}, title="Options to pass during inference calls."
|
||||||
|
)
|
||||||
|
|||||||
@ -64,6 +64,7 @@ class OpenAIClient(GenAIClient):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
|
**self.genai_config.runtime_options,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("Azure OpenAI returned an error: %s", str(e))
|
logger.warning("Azure OpenAI returned an error: %s", str(e))
|
||||||
|
|||||||
@ -35,10 +35,14 @@ class GeminiClient(GenAIClient):
|
|||||||
for img in images
|
for img in images
|
||||||
] + [prompt]
|
] + [prompt]
|
||||||
try:
|
try:
|
||||||
|
# Merge runtime_options into generation_config if provided
|
||||||
|
generation_config_dict = {"candidate_count": 1}
|
||||||
|
generation_config_dict.update(self.genai_config.runtime_options)
|
||||||
|
|
||||||
response = self.provider.generate_content(
|
response = self.provider.generate_content(
|
||||||
data,
|
data,
|
||||||
generation_config=genai.types.GenerationConfig(
|
generation_config=genai.types.GenerationConfig(
|
||||||
candidate_count=1,
|
**generation_config_dict
|
||||||
),
|
),
|
||||||
request_options=genai.types.RequestOptions(
|
request_options=genai.types.RequestOptions(
|
||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
|
|||||||
@ -58,11 +58,15 @@ class OllamaClient(GenAIClient):
|
|||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
|
ollama_options = {
|
||||||
|
**self.provider_options,
|
||||||
|
**self.genai_config.runtime_options,
|
||||||
|
}
|
||||||
result = self.provider.generate(
|
result = self.provider.generate(
|
||||||
self.genai_config.model,
|
self.genai_config.model,
|
||||||
prompt,
|
prompt,
|
||||||
images=images if images else None,
|
images=images if images else None,
|
||||||
**self.provider_options,
|
**ollama_options,
|
||||||
)
|
)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
||||||
|
|||||||
@ -22,9 +22,14 @@ class OpenAIClient(GenAIClient):
|
|||||||
|
|
||||||
def _init_provider(self):
|
def _init_provider(self):
|
||||||
"""Initialize the client."""
|
"""Initialize the client."""
|
||||||
return OpenAI(
|
# Extract context_size from provider_options as it's not a valid OpenAI client parameter
|
||||||
api_key=self.genai_config.api_key, **self.genai_config.provider_options
|
# It will be used in get_context_size() instead
|
||||||
)
|
provider_opts = {
|
||||||
|
k: v
|
||||||
|
for k, v in self.genai_config.provider_options.items()
|
||||||
|
if k != "context_size"
|
||||||
|
}
|
||||||
|
return OpenAI(api_key=self.genai_config.api_key, **provider_opts)
|
||||||
|
|
||||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||||
"""Submit a request to OpenAI."""
|
"""Submit a request to OpenAI."""
|
||||||
@ -56,6 +61,7 @@ class OpenAIClient(GenAIClient):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
|
**self.genai_config.runtime_options,
|
||||||
)
|
)
|
||||||
if (
|
if (
|
||||||
result is not None
|
result is not None
|
||||||
@ -73,6 +79,16 @@ class OpenAIClient(GenAIClient):
|
|||||||
if self.context_size is not None:
|
if self.context_size is not None:
|
||||||
return self.context_size
|
return self.context_size
|
||||||
|
|
||||||
|
# First check provider_options for manually specified context size
|
||||||
|
# This is necessary for llama.cpp and other OpenAI-compatible servers
|
||||||
|
# that don't expose the configured runtime context size in the API response
|
||||||
|
if "context_size" in self.genai_config.provider_options:
|
||||||
|
self.context_size = self.genai_config.provider_options["context_size"]
|
||||||
|
logger.debug(
|
||||||
|
f"Using context size {self.context_size} from provider_options for model {self.genai_config.model}"
|
||||||
|
)
|
||||||
|
return self.context_size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
models = self.provider.models.list()
|
models = self.provider.models.list()
|
||||||
for model in models.data:
|
for model in models.data:
|
||||||
|
|||||||
@ -43,6 +43,7 @@ def write_training_metadata(model_name: str, image_count: int) -> None:
|
|||||||
model_name: Name of the classification model
|
model_name: Name of the classification model
|
||||||
image_count: Number of images used in training
|
image_count: Number of images used in training
|
||||||
"""
|
"""
|
||||||
|
model_name = model_name.strip()
|
||||||
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
||||||
os.makedirs(clips_model_dir, exist_ok=True)
|
os.makedirs(clips_model_dir, exist_ok=True)
|
||||||
|
|
||||||
@ -70,6 +71,7 @@ def read_training_metadata(model_name: str) -> dict[str, any] | None:
|
|||||||
Returns:
|
Returns:
|
||||||
Dictionary with last_training_date and last_training_image_count, or None if not found
|
Dictionary with last_training_date and last_training_image_count, or None if not found
|
||||||
"""
|
"""
|
||||||
|
model_name = model_name.strip()
|
||||||
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
||||||
metadata_path = os.path.join(clips_model_dir, TRAINING_METADATA_FILE)
|
metadata_path = os.path.join(clips_model_dir, TRAINING_METADATA_FILE)
|
||||||
|
|
||||||
@ -95,6 +97,7 @@ def get_dataset_image_count(model_name: str) -> int:
|
|||||||
Returns:
|
Returns:
|
||||||
Total count of images across all categories
|
Total count of images across all categories
|
||||||
"""
|
"""
|
||||||
|
model_name = model_name.strip()
|
||||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||||
|
|
||||||
if not os.path.exists(dataset_dir):
|
if not os.path.exists(dataset_dir):
|
||||||
@ -126,6 +129,7 @@ class ClassificationTrainingProcess(FrigateProcess):
|
|||||||
"TF_KERAS_MOBILENET_V2_WEIGHTS_URL",
|
"TF_KERAS_MOBILENET_V2_WEIGHTS_URL",
|
||||||
"",
|
"",
|
||||||
)
|
)
|
||||||
|
model_name = model_name.strip()
|
||||||
super().__init__(
|
super().__init__(
|
||||||
stop_event=None,
|
stop_event=None,
|
||||||
priority=PROCESS_PRIORITY_LOW,
|
priority=PROCESS_PRIORITY_LOW,
|
||||||
@ -292,6 +296,7 @@ class ClassificationTrainingProcess(FrigateProcess):
|
|||||||
def kickoff_model_training(
|
def kickoff_model_training(
|
||||||
embeddingRequestor: EmbeddingsRequestor, model_name: str
|
embeddingRequestor: EmbeddingsRequestor, model_name: str
|
||||||
) -> None:
|
) -> None:
|
||||||
|
model_name = model_name.strip()
|
||||||
requestor = InterProcessRequestor()
|
requestor = InterProcessRequestor()
|
||||||
requestor.send_data(
|
requestor.send_data(
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
@ -359,6 +364,7 @@ def collect_state_classification_examples(
|
|||||||
model_name: Name of the classification model
|
model_name: Name of the classification model
|
||||||
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
||||||
"""
|
"""
|
||||||
|
model_name = model_name.strip()
|
||||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||||
|
|
||||||
# Step 1: Get review items for the cameras
|
# Step 1: Get review items for the cameras
|
||||||
@ -714,6 +720,7 @@ def collect_object_classification_examples(
|
|||||||
model_name: Name of the classification model
|
model_name: Name of the classification model
|
||||||
label: Object label to collect (e.g., "person", "car")
|
label: Object label to collect (e.g., "person", "car")
|
||||||
"""
|
"""
|
||||||
|
model_name = model_name.strip()
|
||||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||||
temp_dir = os.path.join(dataset_dir, "temp")
|
temp_dir = os.path.join(dataset_dir, "temp")
|
||||||
os.makedirs(temp_dir, exist_ok=True)
|
os.makedirs(temp_dir, exist_ok=True)
|
||||||
|
|||||||
@ -15,6 +15,9 @@
|
|||||||
},
|
},
|
||||||
"provider_options": {
|
"provider_options": {
|
||||||
"label": "GenAI Provider extra options."
|
"label": "GenAI Provider extra options."
|
||||||
|
},
|
||||||
|
"runtime_options": {
|
||||||
|
"label": "Options to pass during inference calls."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ export default function Events() {
|
|||||||
cameras: reviewSearchParams["cameras"],
|
cameras: reviewSearchParams["cameras"],
|
||||||
labels: reviewSearchParams["labels"],
|
labels: reviewSearchParams["labels"],
|
||||||
zones: reviewSearchParams["zones"],
|
zones: reviewSearchParams["zones"],
|
||||||
reviewed: 1,
|
reviewed: null, // We want both reviewed and unreviewed items as we filter in the UI
|
||||||
before: reviewSearchParams["before"] || last24Hours.before,
|
before: reviewSearchParams["before"] || last24Hours.before,
|
||||||
after: reviewSearchParams["after"] || last24Hours.after,
|
after: reviewSearchParams["after"] || last24Hours.after,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -114,6 +114,7 @@ export default function LiveDashboardView({
|
|||||||
{
|
{
|
||||||
limit: 10,
|
limit: 10,
|
||||||
severity: "alert",
|
severity: "alert",
|
||||||
|
reviewed: 0,
|
||||||
cameras: alertCameras,
|
cameras: alertCameras,
|
||||||
},
|
},
|
||||||
]);
|
]);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user