From 949c426df1b25ba8477982a2dc8f963f70993ecc Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 30 Nov 2025 12:32:32 -0600
Subject: [PATCH 1/9] ensure audio events display timeline entries in tracking
details
---
.../overlay/detail/SearchDetailDialog.tsx | 2 +-
.../overlay/detail/TrackingDetails.tsx | 179 +++++++++---------
2 files changed, 92 insertions(+), 89 deletions(-)
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 4ead27218..dd1384951 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -498,7 +498,7 @@ export default function SearchDetailDialog({
const views = [...SEARCH_TABS];
- if (search.data.type != "object" || !search.has_clip) {
+ if (!search.has_clip) {
const index = views.indexOf("tracking_details");
views.splice(index, 1);
}
diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx
index 26cba7d3a..ae4cf31f3 100644
--- a/web/src/components/overlay/detail/TrackingDetails.tsx
+++ b/web/src/components/overlay/detail/TrackingDetails.tsx
@@ -900,96 +900,99 @@ function LifecycleIconRow({
{getLifecycleItemDescription(item)}
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.score")}
-
- {score}
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.ratio")}
-
- {ratio}
-
-
-
- {t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
- {attributeAreaPx !== undefined &&
- attributeAreaPct !== undefined && (
-
- ({getTranslatedLabel(item.data.label)})
-
- )}
-
- {areaPx !== undefined && areaPct !== undefined ? (
-
- {t("information.pixels", { ns: "common", area: areaPx })} ·{" "}
- {areaPct}%
+ {/* Only show Score/Ratio/Area for object events, not for audio (heard) or manual API (external) events */}
+ {item.class_type !== "heard" && item.class_type !== "external" && (
+
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.score")}
- ) : (
- N/A
- )}
-
- {attributeAreaPx !== undefined &&
- attributeAreaPct !== undefined && (
-
-
- {t("trackingDetails.lifecycleItemDesc.header.area")} (
- {getTranslatedLabel(item.data.attribute)})
-
-
- {t("information.pixels", {
- ns: "common",
- area: attributeAreaPx,
- })}{" "}
- · {attributeAreaPct}%
-
-
- )}
-
- {item.data?.zones && item.data.zones.length > 0 && (
-
- {item.data.zones.map((zone, zidx) => {
- const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
- return (
- {
- e.stopPropagation();
- setSelectedZone(zone);
- }}
- style={{
- borderColor: `rgba(${color}, 0.6)`,
- background: `rgba(${color}, 0.08)`,
- }}
- >
-
-
- {item.data?.zones_friendly_names?.[zidx]}
-
-
- );
- })}
+ {score}
- )}
-
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.ratio")}
+
+ {ratio}
+
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.area")}{" "}
+ {attributeAreaPx !== undefined &&
+ attributeAreaPct !== undefined && (
+
+ ({getTranslatedLabel(item.data.label)})
+
+ )}
+
+ {areaPx !== undefined && areaPct !== undefined ? (
+
+ {t("information.pixels", { ns: "common", area: areaPx })}{" "}
+ · {areaPct}%
+
+ ) : (
+ N/A
+ )}
+
+ {attributeAreaPx !== undefined &&
+ attributeAreaPct !== undefined && (
+
+
+ {t("trackingDetails.lifecycleItemDesc.header.area")} (
+ {getTranslatedLabel(item.data.attribute)})
+
+
+ {t("information.pixels", {
+ ns: "common",
+ area: attributeAreaPx,
+ })}{" "}
+ · {attributeAreaPct}%
+
+
+ )}
+
+ )}
+
+ {item.data?.zones && item.data.zones.length > 0 && (
+
+ {item.data.zones.map((zone, zidx) => {
+ const color = getZoneColor(zone)?.join(",") ?? "0,0,0";
+ return (
+ {
+ e.stopPropagation();
+ setSelectedZone(zone);
+ }}
+ style={{
+ borderColor: `rgba(${color}, 0.6)`,
+ background: `rgba(${color}, 0.08)`,
+ }}
+ >
+
+
+ {item.data?.zones_friendly_names?.[zidx]}
+
+
+ );
+ })}
+
+ )}
From 0d614b5a3e99579adc378222a4d34554c112a936 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Mon, 1 Dec 2025 11:02:58 -0600
Subject: [PATCH 2/9] tweak tracking details layout for small desktop sizes
---
web/src/components/overlay/detail/SearchDetailDialog.tsx | 5 ++---
web/src/components/overlay/detail/TrackingDetails.tsx | 2 +-
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index dd1384951..1c46213df 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -548,7 +548,7 @@ export default function SearchDetailDialog({
"relative flex items-center justify-between",
"w-full",
// match dialog's max-width classes
- "sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
+ "max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
)}
>
@@ -594,8 +594,7 @@ export default function SearchDetailDialog({
ref={isDesktop ? dialogContentRef : undefined}
className={cn(
"scrollbar-container overflow-y-auto",
- isDesktop &&
- "max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-[70%]",
+ isDesktop && "max-h-[95dvh] max-w-[85%] xl:max-w-[70%]",
isMobile && "flex h-full flex-col px-4",
)}
onEscapeKeyDown={(event) => {
diff --git a/web/src/components/overlay/detail/TrackingDetails.tsx b/web/src/components/overlay/detail/TrackingDetails.tsx
index ae4cf31f3..fa2f0cb2b 100644
--- a/web/src/components/overlay/detail/TrackingDetails.tsx
+++ b/web/src/components/overlay/detail/TrackingDetails.tsx
@@ -622,7 +622,7 @@ export function TrackingDetails({
{isDesktop && tabs && (
From 475ab146b4235f0841d21119ddf3491469bff594 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Mon, 1 Dec 2025 11:13:59 -0600
Subject: [PATCH 3/9] update transcription docs
---
docs/docs/configuration/audio_detectors.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md
index 80b0727a5..f2ff99b6b 100644
--- a/docs/docs/configuration/audio_detectors.md
+++ b/docs/docs/configuration/audio_detectors.md
@@ -168,6 +168,8 @@ Recorded `speech` events will always use a `whisper` model, regardless of the `m
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
+ Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
+
2. Why don't you save live transcription text and use that for `speech` events?
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
From d5f5e93f4fc8bf996d820accd8bb4136c97af654 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Mon, 1 Dec 2025 10:51:03 -0700
Subject: [PATCH 4/9] Update classification docs for training recommendations
---
.cspell/frigate-dictionary.txt | 1 +
.../custom_classification/state_classification.md | 4 +++-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt
index 6e66a4704..329c41815 100644
--- a/.cspell/frigate-dictionary.txt
+++ b/.cspell/frigate-dictionary.txt
@@ -191,6 +191,7 @@ ONVIF
openai
opencv
openvino
+overfitting
OWASP
paddleocr
paho
diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md
index 66d3e60ca..927fe91af 100644
--- a/docs/docs/configuration/custom_classification/state_classification.md
+++ b/docs/docs/configuration/custom_classification/state_classification.md
@@ -69,4 +69,6 @@ Once all images are assigned, training will begin automatically.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
-- **Data collection**: Use the model’s Recent Classifications tab to gather balanced examples across times of day and weather.
+- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
+- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
+- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
From 755f51f1adda04474f50468ec9ef84b2d641299d Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Mon, 1 Dec 2025 12:49:13 -0700
Subject: [PATCH 5/9] Make number of classification images to be kept
configurable
---
docs/docs/configuration/reference.md | 32 +++++++++++++++++++
frigate/config/classification.py | 5 +++
.../real_time/custom_classification.py | 25 ++++++++++++++-
frigate/util/classification.py | 3 +-
web/src/types/frigateConfig.ts | 1 +
5 files changed, 63 insertions(+), 3 deletions(-)
diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md
index f8b49303f..21c921a26 100644
--- a/docs/docs/configuration/reference.md
+++ b/docs/docs/configuration/reference.md
@@ -710,6 +710,38 @@ audio_transcription:
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
language: en
+# Optional: Configuration for custom classification models
+classification:
+ custom:
+ # Required: name of the classification model
+ model_name:
+ # Optional: Enable running the model (default: shown below)
+ enabled: True
+ # Optional: Name of classification model (default: shown below)
+ name: None
+ # Optional: Classification score threshold to change the state (default: shown below)
+ threshold: 0.8
+ # Optional: Number of classification attempts to save in the recent classifications tab (default: shown below)
+ # NOTE: Defaults to 200 for object classification and 100 for state classification if not specified
+ save_attempts: None
+ # Optional: Object classification configuration
+ object_config:
+ # Required: Object types to classify
+ objects: [dog]
+ # Optional: Type of classification that is applied (default: shown below)
+ classification_type: sub_label
+ # Optional: State classification configuration
+ state_config:
+ # Required: Cameras to run classification on
+ cameras:
+ camera_name:
+ # Required: Crop of image frame on this camera to run classification on
+ crop: [0, 180, 220, 400]
+ # Optional: If classification should be run when motion is detected in the crop (default: shown below)
+ motion: False
+ # Optional: Interval to run classification on in seconds (default: shown below)
+ interval: None
+
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used,
diff --git a/frigate/config/classification.py b/frigate/config/classification.py
index bdcbf48f1..fb8e3de29 100644
--- a/frigate/config/classification.py
+++ b/frigate/config/classification.py
@@ -105,6 +105,11 @@ class CustomClassificationConfig(FrigateBaseModel):
threshold: float = Field(
default=0.8, title="Classification score threshold to change the state."
)
+ save_attempts: int | None = Field(
+ default=None,
+ title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
+ ge=0,
+ )
object_config: CustomClassificationObjectConfig | None = Field(default=None)
state_config: CustomClassificationStateConfig | None = Field(default=None)
diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py
index 179d2f43f..c8f31db76 100644
--- a/frigate/data_processing/real_time/custom_classification.py
+++ b/frigate/data_processing/real_time/custom_classification.py
@@ -250,6 +250,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
if self.interpreter is None:
# When interpreter is None, always save (score is 0.0, which is < 1.0)
if self._should_save_image(camera, "unknown", 0.0):
+ save_attempts = (
+ self.model_config.save_attempts
+ if self.model_config.save_attempts is not None
+ else 100
+ )
write_classification_attempt(
self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
@@ -257,6 +262,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
now,
"unknown",
0.0,
+ max_files=save_attempts,
)
return
@@ -277,6 +283,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
detected_state = self.labelmap[best_id]
if self._should_save_image(camera, detected_state, score):
+ save_attempts = (
+ self.model_config.save_attempts
+ if self.model_config.save_attempts is not None
+ else 100
+ )
write_classification_attempt(
self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
@@ -284,6 +295,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
now,
detected_state,
score,
+ max_files=save_attempts,
)
if score < self.model_config.threshold:
@@ -482,6 +494,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
return
if self.interpreter is None:
+ save_attempts = (
+ self.model_config.save_attempts
+ if self.model_config.save_attempts is not None
+ else 200
+ )
write_classification_attempt(
self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
@@ -489,6 +506,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
now,
"unknown",
0.0,
+ max_files=save_attempts,
)
return
@@ -506,6 +524,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
score = round(probs[best_id], 2)
self.__update_metrics(datetime.datetime.now().timestamp() - now)
+ save_attempts = (
+ self.model_config.save_attempts
+ if self.model_config.save_attempts is not None
+ else 200
+ )
write_classification_attempt(
self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
@@ -513,7 +536,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
now,
self.labelmap[best_id],
score,
- max_files=200,
+ max_files=save_attempts,
)
if score < self.model_config.threshold:
diff --git a/frigate/util/classification.py b/frigate/util/classification.py
index a74094c32..1f4213315 100644
--- a/frigate/util/classification.py
+++ b/frigate/util/classification.py
@@ -330,7 +330,7 @@ def collect_state_classification_examples(
1. Queries review items from specified cameras
2. Selects 100 balanced timestamps across the data
3. Extracts keyframes from recordings (cropped to specified regions)
- 4. Selects 20 most visually distinct images
+ 4. Selects 24 most visually distinct images
5. Saves them to the dataset directory
Args:
@@ -660,7 +660,6 @@ def collect_object_classification_examples(
Args:
model_name: Name of the classification model
label: Object label to collect (e.g., "person", "car")
- cameras: List of camera names to collect examples from
"""
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
temp_dir = os.path.join(dataset_dir, "temp")
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts
index c7cbb50b8..985fe3457 100644
--- a/web/src/types/frigateConfig.ts
+++ b/web/src/types/frigateConfig.ts
@@ -305,6 +305,7 @@ export type CustomClassificationModelConfig = {
enabled: boolean;
name: string;
threshold: number;
+ save_attempts?: number;
object_config?: {
objects: string[];
classification_type: string;
From cac29f96e7e3edb0478c475fe6c564b66682a722 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Mon, 1 Dec 2025 12:51:58 -0700
Subject: [PATCH 6/9] Add bird to classification reference
---
docs/docs/configuration/reference.md | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md
index 21c921a26..a375086cb 100644
--- a/docs/docs/configuration/reference.md
+++ b/docs/docs/configuration/reference.md
@@ -710,8 +710,14 @@ audio_transcription:
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
language: en
-# Optional: Configuration for custom classification models
+# Optional: Configuration for classification models
classification:
+ # Optional: Configuration for bird classification
+ bird:
+ # Optional: Enable bird classification (default: shown below)
+ enabled: False
+ # Optional: Minimum classification score required to be considered a match (default: shown below)
+ threshold: 0.9
custom:
# Required: name of the classification model
model_name:
From e957f8d9f9db977ac67430e9296d22c9c427b508 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Mon, 1 Dec 2025 13:43:44 -0700
Subject: [PATCH 7/9] Fix incorrect averaging of the segments so it correctly
only uses the most recent segments
---
frigate/storage.py | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/frigate/storage.py b/frigate/storage.py
index ee11cf7a9..feabe06ff 100644
--- a/frigate/storage.py
+++ b/frigate/storage.py
@@ -5,7 +5,7 @@ import shutil
import threading
from pathlib import Path
-from peewee import fn
+from peewee import SQL, fn
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR
@@ -44,13 +44,19 @@ class StorageMaintainer(threading.Thread):
)
}
- # calculate MB/hr
+ # calculate MB/hr from last 100 segments
try:
- bandwidth = round(
- Recordings.select(fn.AVG(bandwidth_equation))
+ # Subquery to get last 100 segments, then average their bandwidth
+ last_100 = (
+ Recordings.select(bandwidth_equation.alias("bw"))
.where(Recordings.camera == camera, Recordings.segment_size > 0)
+ .order_by(Recordings.start_time.desc())
.limit(100)
- .scalar()
+ .alias("recent")
+ )
+
+ bandwidth = round(
+ Recordings.select(fn.AVG(SQL("bw"))).from_(last_100).scalar()
* 3600,
2,
)
From bf5e0c76fe9190fc5f3fdd946199b51ed5567987 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Mon, 1 Dec 2025 15:00:20 -0600
Subject: [PATCH 8/9] fix trigger logic
---
frigate/api/event.py | 53 ++++++++++++++++++++++++--------------------
1 file changed, 29 insertions(+), 24 deletions(-)
diff --git a/frigate/api/event.py b/frigate/api/event.py
index 8e966d98b..b8b596cde 100644
--- a/frigate/api/event.py
+++ b/frigate/api/event.py
@@ -1731,37 +1731,40 @@ def create_trigger_embedding(
if event.data.get("type") != "object":
return
- if thumbnail := get_event_thumbnail_bytes(event):
- cursor = context.db.execute_sql(
- """
- SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
- """,
- [body.data],
+ # Get the thumbnail
+ thumbnail = get_event_thumbnail_bytes(event)
+
+ if thumbnail is None:
+ return JSONResponse(
+ content={
+ "success": False,
+ "message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
+ },
+ status_code=400,
)
- row = cursor.fetchone() if cursor else None
+ # Try to reuse existing embedding from database
+ cursor = context.db.execute_sql(
+ """
+ SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
+ """,
+ [body.data],
+ )
- if row:
- query_embedding = row[0]
- embedding = np.frombuffer(query_embedding, dtype=np.float32)
+ row = cursor.fetchone() if cursor else None
+
+ if row:
+ query_embedding = row[0]
+ embedding = np.frombuffer(query_embedding, dtype=np.float32)
else:
- # Extract valid thumbnail
- thumbnail = get_event_thumbnail_bytes(event)
-
- if thumbnail is None:
- return JSONResponse(
- content={
- "success": False,
- "message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
- },
- status_code=400,
- )
-
+ # Generate new embedding
embedding = context.generate_image_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
- if not embedding:
+ if embedding is None or (
+ isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
+ ):
return JSONResponse(
content={
"success": False,
@@ -1896,7 +1899,9 @@ def update_trigger_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
- if not embedding:
+ if embedding is None or (
+ isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
+ ):
return JSONResponse(
content={
"success": False,
From 4ff61a77e7937f4b1c6145c6b78247accc63dfd9 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Mon, 1 Dec 2025 18:32:18 -0600
Subject: [PATCH 9/9] add ability to download clean snapshot
---
web/public/locales/en/views/explore.json | 4 ++++
web/src/components/menu/SearchResultActions.tsx | 12 ++++++++++++
.../overlay/detail/DetailActionsMenu.tsx | 14 ++++++++++++++
3 files changed, 30 insertions(+)
diff --git a/web/public/locales/en/views/explore.json b/web/public/locales/en/views/explore.json
index 5335aa5ac..3f057c38d 100644
--- a/web/public/locales/en/views/explore.json
+++ b/web/public/locales/en/views/explore.json
@@ -170,6 +170,10 @@
"label": "Download snapshot",
"aria": "Download snapshot"
},
+ "downloadCleanSnapshot": {
+ "label": "Download clean snapshot",
+ "aria": "Download clean snapshot"
+ },
"viewTrackingDetails": {
"label": "View tracking details",
"aria": "Show the tracking details"
diff --git a/web/src/components/menu/SearchResultActions.tsx b/web/src/components/menu/SearchResultActions.tsx
index 66de0c496..623005220 100644
--- a/web/src/components/menu/SearchResultActions.tsx
+++ b/web/src/components/menu/SearchResultActions.tsx
@@ -108,6 +108,18 @@ export default function SearchResultActions({
)}
+ {searchResult.has_snapshot &&
+ config?.cameras[searchResult.camera].snapshots.clean_copy && (
+
+ )}
{searchResult.data.type == "object" && (