diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md
index 0fc3ee814..ac0b9387a 100644
--- a/docs/docs/configuration/custom_classification/object_classification.md
+++ b/docs/docs/configuration/custom_classification/object_classification.md
@@ -39,7 +39,7 @@ For object classification:
:::note
-A tracked object can only have a single sub label. If you are using Triggers or Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. Consider using the `attribute` type instead.
+A tracked object can only have a single sub label. If you are using Triggers or Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. This could also occur with `car` objects that are assigned a sub label for a delivery carrier. Consider using the `attribute` type instead.
:::
diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md
index f9a3e1de0..991562871 100644
--- a/docs/docs/configuration/genai.md
+++ b/docs/docs/configuration/genai.md
@@ -48,15 +48,29 @@ Using Ollama on CPU is not recommended, high inference times make using Generati
:::
-[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
+[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
-Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
+Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
+
+### Model Types: Instruct vs Thinking
+
+Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
+
+- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
+- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
+
+Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses.
+
+**Recommendation:**
+Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider’s documentation or model library for guidance on the correct model variant to use.
+
+
### Supported Models
-You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
+You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/search?c=vision). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull qwen3-vl:2b-instruct` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::note
diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md
index 229dd698c..f6ee76ee5 100644
--- a/docs/docs/configuration/object_detectors.md
+++ b/docs/docs/configuration/object_detectors.md
@@ -157,7 +157,7 @@ A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite`
#### YOLOv9
-[YOLOv9](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite) models that are compiled for Tensorflow Lite and properly quantized are supported, but not included by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. Note that the model may require a custom label file (eg. [use this 17 label file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) for the model linked above.)
+YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
YOLOv9 Setup & Config
@@ -178,7 +178,7 @@ model:
labelmap_path: /config/labels-coco17.txt
```
-Note that the labelmap uses a subset of the complete COCO label set that has only 17 objects.
+Note that due to hardware limitations of the Coral, the labelmap is a subset of the COCO labels and includes only 17 object classes.
diff --git a/web/public/locales/en/views/events.json b/web/public/locales/en/views/events.json
index 5c0f137b3..ea3ee853d 100644
--- a/web/public/locales/en/views/events.json
+++ b/web/public/locales/en/views/events.json
@@ -9,7 +9,11 @@
"empty": {
"alert": "There are no alerts to review",
"detection": "There are no detections to review",
- "motion": "No motion data found"
+ "motion": "No motion data found",
+ "recordingsDisabled": {
+ "title": "Recordings must be enabled",
+ "description": "Review items can only be created for a camera when recordings are enabled for that camera."
+ }
},
"timeline": "Timeline",
"timeline.aria": "Select timeline",
diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json
index ff95e2fc6..53b04e6c4 100644
--- a/web/public/locales/en/views/explore.json
+++ b/web/public/locales/en/views/explore.json
@@ -166,6 +166,9 @@
"tips": {
"descriptionSaved": "Successfully saved description",
"saveDescriptionFailed": "Failed to update the description: {{errorMessage}}"
+ },
+ "title": {
+ "label": "Title"
}
},
"itemMenu": {
diff --git a/web/src/components/card/EmptyCard.tsx b/web/src/components/card/EmptyCard.tsx
index de934482f..8d6b67a68 100644
--- a/web/src/components/card/EmptyCard.tsx
+++ b/web/src/components/card/EmptyCard.tsx
@@ -2,15 +2,18 @@ import React from "react";
import { Button } from "../ui/button";
import Heading from "../ui/heading";
import { Link } from "react-router-dom";
+import { cn } from "@/lib/utils";
type EmptyCardProps = {
+ className?: string;
icon: React.ReactNode;
title: string;
- description: string;
+ description?: string;
buttonText?: string;
link?: string;
};
export function EmptyCard({
+ className,
icon,
title,
description,
@@ -18,10 +21,12 @@ export function EmptyCard({
link,
}: EmptyCardProps) {
return (
-