Compare commits

..

7 Commits

Author SHA1 Message Date
Josh Hawkins
bf5e0c76fe fix trigger logic 2025-12-01 15:00:20 -06:00
Nicolas Mowen
e957f8d9f9 Fix incorrect averaging of the segments so it correctly only uses the most recent segments 2025-12-01 13:43:44 -07:00
Nicolas Mowen
cac29f96e7 Add bird to classification reference 2025-12-01 12:51:58 -07:00
Nicolas Mowen
755f51f1ad Make number of classification images to be kept configurable 2025-12-01 12:49:13 -07:00
Nicolas Mowen
d5f5e93f4f Update classification docs for training recommendations 2025-12-01 10:51:03 -07:00
Josh Hawkins
475ab146b4 update transcription docs 2025-12-01 11:13:59 -06:00
Josh Hawkins
0d614b5a3e tweak tracking details layout for small desktop sizes 2025-12-01 11:02:58 -06:00
12 changed files with 118 additions and 37 deletions

View File

@ -191,6 +191,7 @@ ONVIF
openai
opencv
openvino
overfitting
OWASP
paddleocr
paho

View File

@ -168,6 +168,8 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m
If you hear speech thats actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
2. Why don't you save live transcription text and use that for `speech` events?
Theres no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.

View File

@ -69,4 +69,6 @@ Once all images are assigned, training will begin automatically.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
- **Data collection**: Use the models Recent Classifications tab to gather balanced examples across times of day and weather.
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.

View File

@ -710,6 +710,44 @@ audio_transcription:
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
language: en
# Optional: Configuration for classification models
classification:
# Optional: Configuration for bird classification
bird:
# Optional: Enable bird classification (default: shown below)
enabled: False
# Optional: Minimum classification score required to be considered a match (default: shown below)
threshold: 0.9
custom:
# Required: name of the classification model
model_name:
# Optional: Enable running the model (default: shown below)
enabled: True
# Optional: Name of classification model (default: shown below)
name: None
# Optional: Classification score threshold to change the state (default: shown below)
threshold: 0.8
# Optional: Number of classification attempts to save in the recent classifications tab (default: shown below)
# NOTE: Defaults to 200 for object classification and 100 for state classification if not specified
save_attempts: None
# Optional: Object classification configuration
object_config:
# Required: Object types to classify
objects: [dog]
# Optional: Type of classification that is applied (default: shown below)
classification_type: sub_label
# Optional: State classification configuration
state_config:
# Required: Cameras to run classification on
cameras:
camera_name:
# Required: Crop of image frame on this camera to run classification on
crop: [0, 180, 220, 400]
# Optional: If classification should be run when motion is detected in the crop (default: shown below)
motion: False
# Optional: Interval to run classification on in seconds (default: shown below)
interval: None
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used,

View File

@ -1731,7 +1731,19 @@ def create_trigger_embedding(
if event.data.get("type") != "object":
return
if thumbnail := get_event_thumbnail_bytes(event):
# Get the thumbnail
thumbnail = get_event_thumbnail_bytes(event)
if thumbnail is None:
return JSONResponse(
content={
"success": False,
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
},
status_code=400,
)
# Try to reuse existing embedding from database
cursor = context.db.execute_sql(
"""
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
@ -1745,23 +1757,14 @@ def create_trigger_embedding(
query_embedding = row[0]
embedding = np.frombuffer(query_embedding, dtype=np.float32)
else:
# Extract valid thumbnail
thumbnail = get_event_thumbnail_bytes(event)
if thumbnail is None:
return JSONResponse(
content={
"success": False,
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
},
status_code=400,
)
# Generate new embedding
embedding = context.generate_image_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
if not embedding:
if embedding is None or (
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
):
return JSONResponse(
content={
"success": False,
@ -1896,7 +1899,9 @@ def update_trigger_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
if not embedding:
if embedding is None or (
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
):
return JSONResponse(
content={
"success": False,

View File

@ -105,6 +105,11 @@ class CustomClassificationConfig(FrigateBaseModel):
threshold: float = Field(
default=0.8, title="Classification score threshold to change the state."
)
save_attempts: int | None = Field(
default=None,
title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
ge=0,
)
object_config: CustomClassificationObjectConfig | None = Field(default=None)
state_config: CustomClassificationStateConfig | None = Field(default=None)

View File

@ -250,6 +250,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
if self.interpreter is None:
# When interpreter is None, always save (score is 0.0, which is < 1.0)
if self._should_save_image(camera, "unknown", 0.0):
save_attempts = (
self.model_config.save_attempts
if self.model_config.save_attempts is not None
else 100
)
write_classification_attempt(
self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
@ -257,6 +262,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
now,
"unknown",
0.0,
max_files=save_attempts,
)
return
@ -277,6 +283,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
detected_state = self.labelmap[best_id]
if self._should_save_image(camera, detected_state, score):
save_attempts = (
self.model_config.save_attempts
if self.model_config.save_attempts is not None
else 100
)
write_classification_attempt(
self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
@ -284,6 +295,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
now,
detected_state,
score,
max_files=save_attempts,
)
if score < self.model_config.threshold:
@ -482,6 +494,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
return
if self.interpreter is None:
save_attempts = (
self.model_config.save_attempts
if self.model_config.save_attempts is not None
else 200
)
write_classification_attempt(
self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
@ -489,6 +506,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
now,
"unknown",
0.0,
max_files=save_attempts,
)
return
@ -506,6 +524,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - now)
save_attempts = (
self.model_config.save_attempts
if self.model_config.save_attempts is not None
else 200
)
write_classification_attempt(
self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
@ -513,7 +536,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
now,
self.labelmap[best_id],
score,
max_files=200,
max_files=save_attempts,
)
if score < self.model_config.threshold:

View File

@ -5,7 +5,7 @@ import shutil
import threading
from pathlib import Path
from peewee import fn
from peewee import SQL, fn
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR
@ -44,13 +44,19 @@ class StorageMaintainer(threading.Thread):
)
}
# calculate MB/hr
# calculate MB/hr from last 100 segments
try:
bandwidth = round(
Recordings.select(fn.AVG(bandwidth_equation))
# Subquery to get last 100 segments, then average their bandwidth
last_100 = (
Recordings.select(bandwidth_equation.alias("bw"))
.where(Recordings.camera == camera, Recordings.segment_size > 0)
.order_by(Recordings.start_time.desc())
.limit(100)
.scalar()
.alias("recent")
)
bandwidth = round(
Recordings.select(fn.AVG(SQL("bw"))).from_(last_100).scalar()
* 3600,
2,
)

View File

@ -330,7 +330,7 @@ def collect_state_classification_examples(
1. Queries review items from specified cameras
2. Selects 100 balanced timestamps across the data
3. Extracts keyframes from recordings (cropped to specified regions)
4. Selects 20 most visually distinct images
4. Selects 24 most visually distinct images
5. Saves them to the dataset directory
Args:
@ -660,7 +660,6 @@ def collect_object_classification_examples(
Args:
model_name: Name of the classification model
label: Object label to collect (e.g., "person", "car")
cameras: List of camera names to collect examples from
"""
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
temp_dir = os.path.join(dataset_dir, "temp")

View File

@ -548,7 +548,7 @@ export default function SearchDetailDialog({
"relative flex items-center justify-between",
"w-full",
// match dialog's max-width classes
"sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
"max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
)}
>
<Tooltip>
@ -594,8 +594,7 @@ export default function SearchDetailDialog({
ref={isDesktop ? dialogContentRef : undefined}
className={cn(
"scrollbar-container overflow-y-auto",
isDesktop &&
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
isDesktop && "max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
isMobile && "flex h-full flex-col px-4",
)}
onEscapeKeyDown={(event) => {

View File

@ -622,7 +622,7 @@ export function TrackingDetails({
<div
className={cn(
isDesktop && "justify-between overflow-hidden md:basis-2/5",
isDesktop && "justify-between overflow-hidden lg:basis-2/5",
)}
>
{isDesktop && tabs && (

View File

@ -305,6 +305,7 @@ export type CustomClassificationModelConfig = {
enabled: boolean;
name: string;
threshold: number;
save_attempts?: number;
object_config?: {
objects: string[];
classification_type: string;