diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 6dddfc615..46241c5ab 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -320,6 +320,12 @@ http { add_header Cache-Control "public"; } + location /fonts/ { + access_log off; + expires 1y; + add_header Cache-Control "public"; + } + location /locales/ { access_log off; add_header Cache-Control "public"; diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index 55b61f9f3..018dc2050 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -70,7 +70,7 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru genai: provider: ollama base_url: http://localhost:11434 - model: llava:7b + model: qwen3-vl:4b ``` ## Google Gemini diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md index ac822a3a6..7e5618b5b 100644 --- a/docs/docs/configuration/genai/config.md +++ b/docs/docs/configuration/genai/config.md @@ -35,19 +35,18 @@ Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger s :::tip -If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task. +If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama. ::: The following models are recommended: -| Model | Notes | -| ----------------- | ----------------------------------------------------------- | -| `qwen3-vl` | Strong visual and situational understanding | -| `Intern3.5VL` | Relatively fast with good vision comprehension | -| `gemma3` | Strong frame-to-frame understanding, slower inference times | -| `qwen2.5-vl` | Fast but capable model with good vision comprehension | -| `llava-phi3` | Lightweight and fast model with vision comprehension | +| Model | Notes | +| ----------------- | -------------------------------------------------------------------- | +| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement | +| `Intern3.5VL` | Relatively fast with good vision comprehension | +| `gemma3` | Strong frame-to-frame understanding, slower inference times | +| `qwen2.5-vl` | Fast but capable model with good vision comprehension | :::note diff --git a/web/src/views/settings/TriggerView.tsx b/web/src/views/settings/TriggerView.tsx index c81702d2b..0b004fd82 100644 --- a/web/src/views/settings/TriggerView.tsx +++ b/web/src/views/settings/TriggerView.tsx @@ -198,9 +198,9 @@ export default function TriggerView({ return axios .put("config/set", configBody) - .then((configResponse) => { + .then(async (configResponse) => { if (configResponse.status === 200) { - updateConfig(); + await updateConfig(); const displayName = friendly_name && friendly_name !== "" ? `${friendly_name} (${name})` @@ -353,9 +353,9 @@ export default function TriggerView({ return axios .put("config/set", configBody) - .then((configResponse) => { + .then(async (configResponse) => { if (configResponse.status === 200) { - updateConfig(); + await updateConfig(); const friendly = config?.cameras?.[selectedCamera]?.semantic_search ?.triggers?.[name]?.friendly_name;