Miscellaneous Fixes (#20866)

* Don't warn when event ids have expired for trigger sync

* Import faster_whisper conditinally to avoid illegal instruction

* Catch OpenVINO runtime error

* fix race condition in detail stream context

navigating between tracked objects in Explore would sometimes prevent the object track from appearing

* Handle case where classification images are deleted

* Adjust default rounded corners on larger screens

* Improve flow handling for classification state

* Remove images when wizard is cancelled

* Improve deletion handling for classes

* Set constraints on review buffers

* Update to support correct data format

* Set minimum duration for recording based review items

* Use friendly name in review genai prompt

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
Josh Hawkins 2025-11-10 11:03:56 -06:00 committed by GitHub
parent 99a363c047
commit c371fc0c87
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 287 additions and 113 deletions

View File

@ -595,9 +595,13 @@ def get_classification_dataset(name: str):
"last_training_image_count": 0, "last_training_image_count": 0,
"current_image_count": current_image_count, "current_image_count": current_image_count,
"new_images_count": current_image_count, "new_images_count": current_image_count,
"dataset_changed": current_image_count > 0,
} }
else: else:
last_training_count = metadata.get("last_training_image_count", 0) last_training_count = metadata.get("last_training_image_count", 0)
# Dataset has changed if count is different (either added or deleted images)
dataset_changed = current_image_count != last_training_count
# Only show positive count for new images (ignore deletions in the count display)
new_images_count = max(0, current_image_count - last_training_count) new_images_count = max(0, current_image_count - last_training_count)
training_metadata = { training_metadata = {
"has_trained": True, "has_trained": True,
@ -605,6 +609,7 @@ def get_classification_dataset(name: str):
"last_training_image_count": last_training_count, "last_training_image_count": last_training_count,
"current_image_count": current_image_count, "current_image_count": current_image_count,
"new_images_count": new_images_count, "new_images_count": new_images_count,
"dataset_changed": dataset_changed,
} }
return JSONResponse( return JSONResponse(
@ -948,31 +953,29 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Delete a classification model", summary="Delete a classification model",
description="""Deletes a specific classification model and all its associated data. description="""Deletes a specific classification model and all its associated data.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""", Works even if the model is not in the config (e.g., partially created during wizard).
Returns a success message.""",
) )
def delete_classification_model(request: Request, name: str): def delete_classification_model(request: Request, name: str):
config: FrigateConfig = request.app.frigate_config sanitized_name = sanitize_filename(name)
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
# Delete the classification model's data directory in clips # Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name)) data_dir = os.path.join(CLIPS_DIR, sanitized_name)
if os.path.exists(data_dir): if os.path.exists(data_dir):
shutil.rmtree(data_dir) try:
shutil.rmtree(data_dir)
logger.info(f"Deleted classification data directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete data directory for {name}: {e}")
# Delete the classification model's files in model_cache # Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name)) model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name)
if os.path.exists(model_dir): if os.path.exists(model_dir):
shutil.rmtree(model_dir) try:
shutil.rmtree(model_dir)
logger.info(f"Deleted classification model directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete model directory for {name}: {e}")
return JSONResponse( return JSONResponse(
content=( content=(

View File

@ -177,6 +177,12 @@ class CameraConfig(FrigateBaseModel):
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]: def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
return self._ffmpeg_cmds return self._ffmpeg_cmds
def get_formatted_name(self) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
if self.friendly_name:
return self.friendly_name
return self.name.replace("_", " ").title() if self.name else ""
def create_ffmpeg_cmds(self): def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self: if "_ffmpeg_cmds" in self:
return return

View File

@ -56,6 +56,12 @@ class ZoneConfig(BaseModel):
def contour(self) -> np.ndarray: def contour(self) -> np.ndarray:
return self._contour return self._contour
def get_formatted_name(self, zone_name: str) -> str:
"""Return the friendly name if set, otherwise return a formatted version of the zone name."""
if self.friendly_name:
return self.friendly_name
return zone_name.replace("_", " ").title()
@field_validator("objects", mode="before") @field_validator("objects", mode="before")
@classmethod @classmethod
def validate_objects(cls, v): def validate_objects(cls, v):

View File

@ -4,7 +4,6 @@ import logging
import os import os
import sherpa_onnx import sherpa_onnx
from faster_whisper.utils import download_model
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
@ -25,6 +24,9 @@ class AudioTranscriptionModelRunner:
if model_size == "large": if model_size == "large":
# use the Whisper download function instead of our own # use the Whisper download function instead of our own
# Import dynamically to avoid crashes on systems without AVX support
from faster_whisper.utils import download_model
logger.debug("Downloading Whisper audio transcription model") logger.debug("Downloading Whisper audio transcription model")
download_model( download_model(
size_or_id="small" if device == "cuda" else "tiny", size_or_id="small" if device == "cuda" else "tiny",

View File

@ -6,7 +6,6 @@ import threading
import time import time
from typing import Optional from typing import Optional
from faster_whisper import WhisperModel
from peewee import DoesNotExist from peewee import DoesNotExist
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
@ -51,6 +50,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
def __build_recognizer(self) -> None: def __build_recognizer(self) -> None:
try: try:
# Import dynamically to avoid crashes on systems without AVX support
from faster_whisper import WhisperModel
self.recognizer = WhisperModel( self.recognizer = WhisperModel(
model_size_or_path="small", model_size_or_path="small",
device="cuda" device="cuda"

View File

@ -16,6 +16,7 @@ from peewee import DoesNotExist
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera import CameraConfig
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
from frigate.data_processing.types import PostProcessDataEnum from frigate.data_processing.types import PostProcessDataEnum
@ -30,6 +31,7 @@ from ..types import DataProcessorMetrics
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10 RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
MIN_RECORDING_DURATION = 10
class ReviewDescriptionProcessor(PostProcessorApi): class ReviewDescriptionProcessor(PostProcessorApi):
@ -130,7 +132,17 @@ class ReviewDescriptionProcessor(PostProcessorApi):
if image_source == ImageSourceEnum.recordings: if image_source == ImageSourceEnum.recordings:
duration = final_data["end_time"] - final_data["start_time"] duration = final_data["end_time"] - final_data["start_time"]
buffer_extension = duration * RECORDING_BUFFER_EXTENSION_PERCENT buffer_extension = min(
10, max(2, duration * RECORDING_BUFFER_EXTENSION_PERCENT)
)
# Ensure minimum total duration for short review items
# This provides better context for brief events
total_duration = duration + (2 * buffer_extension)
if total_duration < MIN_RECORDING_DURATION:
# Expand buffer to reach minimum duration, still respecting max of 10s per side
additional_buffer_per_side = (MIN_RECORDING_DURATION - duration) / 2
buffer_extension = min(10, additional_buffer_per_side)
thumbs = self.get_recording_frames( thumbs = self.get_recording_frames(
camera, camera,
@ -182,7 +194,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
self.requestor, self.requestor,
self.genai_client, self.genai_client,
self.review_desc_speed, self.review_desc_speed,
camera, camera_config,
final_data, final_data,
thumbs, thumbs,
camera_config.review.genai, camera_config.review.genai,
@ -411,7 +423,7 @@ def run_analysis(
requestor: InterProcessRequestor, requestor: InterProcessRequestor,
genai_client: GenAIClient, genai_client: GenAIClient,
review_inference_speed: InferenceSpeed, review_inference_speed: InferenceSpeed,
camera: str, camera_config: CameraConfig,
final_data: dict[str, str], final_data: dict[str, str],
thumbs: list[bytes], thumbs: list[bytes],
genai_config: GenAIReviewConfig, genai_config: GenAIReviewConfig,
@ -419,10 +431,19 @@ def run_analysis(
attribute_labels: list[str], attribute_labels: list[str],
) -> None: ) -> None:
start = datetime.datetime.now().timestamp() start = datetime.datetime.now().timestamp()
# Format zone names using zone config friendly names if available
formatted_zones = []
for zone_name in final_data["data"]["zones"]:
if zone_name in camera_config.zones:
formatted_zones.append(
camera_config.zones[zone_name].get_formatted_name(zone_name)
)
analytics_data = { analytics_data = {
"id": final_data["id"], "id": final_data["id"],
"camera": camera, "camera": camera_config.get_formatted_name(),
"zones": final_data["data"]["zones"], "zones": formatted_zones,
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime( "start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
"%A, %I:%M %p" "%A, %I:%M %p"
), ),

View File

@ -394,7 +394,11 @@ class OpenVINOModelRunner(BaseModelRunner):
self.infer_request.set_input_tensor(input_index, input_tensor) self.infer_request.set_input_tensor(input_index, input_tensor)
# Run inference # Run inference
self.infer_request.infer() try:
self.infer_request.infer()
except Exception as e:
logger.error(f"Error during OpenVINO inference: {e}")
return []
# Get all output tensors # Get all output tensors
outputs = [] outputs = []

View File

@ -472,7 +472,7 @@ class Embeddings:
) )
thumbnail_missing = True thumbnail_missing = True
except DoesNotExist: except DoesNotExist:
logger.warning( logger.debug(
f"Event ID {trigger.data} for trigger {trigger_name} does not exist." f"Event ID {trigger.data} for trigger {trigger_name} does not exist."
) )
continue continue

View File

@ -51,8 +51,7 @@ class GenAIClient:
def get_concern_prompt() -> str: def get_concern_prompt() -> str:
if concerns: if concerns:
concern_list = "\n - ".join(concerns) concern_list = "\n - ".join(concerns)
return f""" return f"""- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
- {concern_list}""" - {concern_list}"""
else: else:
return "" return ""
@ -70,7 +69,7 @@ class GenAIClient:
return "\n- (No objects detected)" return "\n- (No objects detected)"
context_prompt = f""" context_prompt = f"""
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera. Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera.
## Normal Activity Patterns for This Property ## Normal Activity Patterns for This Property
@ -110,7 +109,7 @@ Your response MUST be a flat JSON object with:
- Frame 1 = earliest, Frame {len(thumbnails)} = latest - Frame 1 = earliest, Frame {len(thumbnails)} = latest
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"} - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"}
## Objects in Scene ## Objects in Scene

View File

@ -16,6 +16,7 @@
"tooltip": { "tooltip": {
"trainingInProgress": "Model is currently training", "trainingInProgress": "Model is currently training",
"noNewImages": "No new images to train. Classify more images in the dataset first.", "noNewImages": "No new images to train. Classify more images in the dataset first.",
"noChanges": "No changes to the dataset since last training.",
"modelNotReady": "Model is not ready for training" "modelNotReady": "Model is not ready for training"
}, },
"toast": { "toast": {
@ -43,7 +44,9 @@
}, },
"deleteCategory": { "deleteCategory": {
"title": "Delete Class", "title": "Delete Class",
"desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model." "desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model.",
"minClassesTitle": "Cannot Delete Class",
"minClassesDesc": "A classification model must have at least 2 classes. Add another class before deleting this one."
}, },
"deleteModel": { "deleteModel": {
"title": "Delete Classification Model", "title": "Delete Classification Model",

View File

@ -28,6 +28,7 @@ import {
CustomClassificationModelConfig, CustomClassificationModelConfig,
FrigateConfig, FrigateConfig,
} from "@/types/frigateConfig"; } from "@/types/frigateConfig";
import { ClassificationDatasetResponse } from "@/types/classification";
import { getTranslatedLabel } from "@/utils/i18n"; import { getTranslatedLabel } from "@/utils/i18n";
import { zodResolver } from "@hookform/resolvers/zod"; import { zodResolver } from "@hookform/resolvers/zod";
import axios from "axios"; import axios from "axios";
@ -140,16 +141,19 @@ export default function ClassificationModelEditDialog({
}); });
// Fetch dataset to get current classes for state models // Fetch dataset to get current classes for state models
const { data: dataset } = useSWR<{ const { data: dataset } = useSWR<ClassificationDatasetResponse>(
[id: string]: string[]; isStateModel ? `classification/${model.name}/dataset` : null,
}>(isStateModel ? `classification/${model.name}/dataset` : null, { {
revalidateOnFocus: false, revalidateOnFocus: false,
}); },
);
// Update form with classes from dataset when loaded // Update form with classes from dataset when loaded
useEffect(() => { useEffect(() => {
if (isStateModel && dataset) { if (isStateModel && dataset?.categories) {
const classes = Object.keys(dataset).filter((key) => key !== "none"); const classes = Object.keys(dataset.categories).filter(
(key) => key !== "none",
);
if (classes.length > 0) { if (classes.length > 0) {
(form as ReturnType<typeof useForm<StateFormData>>).setValue( (form as ReturnType<typeof useForm<StateFormData>>).setValue(
"classes", "classes",

View File

@ -15,6 +15,7 @@ import Step3ChooseExamples, {
} from "./wizard/Step3ChooseExamples"; } from "./wizard/Step3ChooseExamples";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { isDesktop } from "react-device-detect"; import { isDesktop } from "react-device-detect";
import axios from "axios";
const OBJECT_STEPS = [ const OBJECT_STEPS = [
"wizard.steps.nameAndDefine", "wizard.steps.nameAndDefine",
@ -120,7 +121,18 @@ export default function ClassificationModelWizardDialog({
dispatch({ type: "PREVIOUS_STEP" }); dispatch({ type: "PREVIOUS_STEP" });
}; };
const handleCancel = () => { const handleCancel = async () => {
// Clean up any generated training images if we're cancelling from Step 3
if (wizardState.step1Data && wizardState.step3Data?.examplesGenerated) {
try {
await axios.delete(
`/classification/${wizardState.step1Data.modelName}`,
);
} catch (error) {
// Silently fail - user is already cancelling
}
}
dispatch({ type: "RESET" }); dispatch({ type: "RESET" });
onClose(); onClose();
}; };

View File

@ -165,18 +165,15 @@ export default function Step3ChooseExamples({
const isLastClass = currentClassIndex === allClasses.length - 1; const isLastClass = currentClassIndex === allClasses.length - 1;
if (isLastClass) { if (isLastClass) {
// Assign remaining unclassified images // For object models, assign remaining unclassified images to "none"
unknownImages.slice(0, 24).forEach((imageName) => { // For state models, this should never happen since we require all images to be classified
if (!newClassifications[imageName]) { if (step1Data.modelType !== "state") {
// For state models with 2 classes, assign to the last class unknownImages.slice(0, 24).forEach((imageName) => {
// For object models, assign to "none" if (!newClassifications[imageName]) {
if (step1Data.modelType === "state" && allClasses.length === 2) {
newClassifications[imageName] = allClasses[allClasses.length - 1];
} else {
newClassifications[imageName] = "none"; newClassifications[imageName] = "none";
} }
} });
}); }
// All done, trigger training immediately // All done, trigger training immediately
setImageClassifications(newClassifications); setImageClassifications(newClassifications);
@ -316,8 +313,15 @@ export default function Step3ChooseExamples({
return images; return images;
} }
return images.filter((img) => !imageClassifications[img]); // If we're viewing a previous class (going back), show images for that class
}, [unknownImages, imageClassifications]); // Otherwise show only unclassified images
const currentClassInView = allClasses[currentClassIndex];
return images.filter((img) => {
const imgClass = imageClassifications[img];
// Show if: unclassified OR classified with current class we're viewing
return !imgClass || imgClass === currentClassInView;
});
}, [unknownImages, imageClassifications, allClasses, currentClassIndex]);
const allImagesClassified = useMemo(() => { const allImagesClassified = useMemo(() => {
return unclassifiedImages.length === 0; return unclassifiedImages.length === 0;
@ -326,15 +330,26 @@ export default function Step3ChooseExamples({
// For state models on the last class, require all images to be classified // For state models on the last class, require all images to be classified
const isLastClass = currentClassIndex === allClasses.length - 1; const isLastClass = currentClassIndex === allClasses.length - 1;
const canProceed = useMemo(() => { const canProceed = useMemo(() => {
if ( if (step1Data.modelType === "state" && isLastClass) {
step1Data.modelType === "state" && // Check if all 24 images will be classified after current selections are applied
isLastClass && const totalImages = unknownImages.slice(0, 24).length;
!allImagesClassified
) { // Count images that will be classified (either already classified or currently selected)
return false; const allImages = unknownImages.slice(0, 24);
const willBeClassified = allImages.filter((img) => {
return imageClassifications[img] || selectedImages.has(img);
}).length;
return willBeClassified >= totalImages;
} }
return true; return true;
}, [step1Data.modelType, isLastClass, allImagesClassified]); }, [
step1Data.modelType,
isLastClass,
unknownImages,
imageClassifications,
selectedImages,
]);
const handleBack = useCallback(() => { const handleBack = useCallback(() => {
if (currentClassIndex > 0) { if (currentClassIndex > 0) {

View File

@ -12,13 +12,13 @@ export function ImageShadowOverlay({
<> <>
<div <div
className={cn( className={cn(
"pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl", "pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent",
upperClassName, upperClassName,
)} )}
/> />
<div <div
className={cn( className={cn(
"pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl", "pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent",
lowerClassName, lowerClassName,
)} )}
/> />

View File

@ -77,7 +77,10 @@ export default function BirdseyeLivePlayer({
)} )}
onClick={onClick} onClick={onClick}
> >
<ImageShadowOverlay /> <ImageShadowOverlay
upperClassName="md:rounded-2xl"
lowerClassName="md:rounded-2xl"
/>
<div className="size-full" ref={playerRef}> <div className="size-full" ref={playerRef}>
{player} {player}
</div> </div>

View File

@ -331,7 +331,10 @@ export default function LivePlayer({
> >
{cameraEnabled && {cameraEnabled &&
((showStillWithoutActivity && !liveReady) || liveReady) && ( ((showStillWithoutActivity && !liveReady) || liveReady) && (
<ImageShadowOverlay /> <ImageShadowOverlay
upperClassName="md:rounded-2xl"
lowerClassName="md:rounded-2xl"
/>
)} )}
{player} {player}
{cameraEnabled && {cameraEnabled &&

View File

@ -1,4 +1,10 @@
import React, { createContext, useContext, useState, useEffect } from "react"; import React, {
createContext,
useContext,
useState,
useEffect,
useRef,
} from "react";
import { FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import useSWR from "swr"; import useSWR from "swr";
@ -36,6 +42,23 @@ export function DetailStreamProvider({
() => initialSelectedObjectIds ?? [], () => initialSelectedObjectIds ?? [],
); );
// When the parent provides a new initialSelectedObjectIds (for example
// when navigating between search results) update the selection so children
// like `ObjectTrackOverlay` receive the new ids immediately. We only
// perform this update when the incoming value actually changes.
useEffect(() => {
if (
initialSelectedObjectIds &&
(initialSelectedObjectIds.length !== selectedObjectIds.length ||
initialSelectedObjectIds.some((v, i) => selectedObjectIds[i] !== v))
) {
setSelectedObjectIds(initialSelectedObjectIds);
}
// Intentionally include selectedObjectIds to compare previous value and
// avoid overwriting user interactions unless the incoming prop changed.
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [initialSelectedObjectIds]);
const toggleObjectSelection = (id: string | undefined) => { const toggleObjectSelection = (id: string | undefined) => {
if (id === undefined) { if (id === undefined) {
setSelectedObjectIds([]); setSelectedObjectIds([]);
@ -63,10 +86,33 @@ export function DetailStreamProvider({
setAnnotationOffset(cfgOffset); setAnnotationOffset(cfgOffset);
}, [config, camera]); }, [config, camera]);
// Clear selected objects when exiting detail mode or changing cameras // Clear selected objects when exiting detail mode or when the camera
// changes for providers that are not initialized with an explicit
// `initialSelectedObjectIds` (e.g., the RecordingView). For providers
// that receive `initialSelectedObjectIds` (like SearchDetailDialog) we
// avoid clearing on camera change to prevent a race with children that
// immediately set selection when mounting.
const prevCameraRef = useRef<string | undefined>(undefined);
useEffect(() => { useEffect(() => {
setSelectedObjectIds([]); // Always clear when leaving detail mode
}, [isDetailMode, camera]); if (!isDetailMode) {
setSelectedObjectIds([]);
prevCameraRef.current = camera;
return;
}
// If camera changed and the parent did not provide initialSelectedObjectIds,
// clear selection to preserve previous behavior.
if (
prevCameraRef.current !== undefined &&
prevCameraRef.current !== camera &&
initialSelectedObjectIds === undefined
) {
setSelectedObjectIds([]);
}
prevCameraRef.current = camera;
}, [isDetailMode, camera, initialSelectedObjectIds]);
const value: DetailStreamContextType = { const value: DetailStreamContextType = {
selectedObjectIds, selectedObjectIds,

View File

@ -20,3 +20,17 @@ export type ClassificationThreshold = {
recognition: number; recognition: number;
unknown: number; unknown: number;
}; };
export type ClassificationDatasetResponse = {
categories: {
[id: string]: string[];
};
training_metadata: {
has_trained: boolean;
last_training_date: string | null;
last_training_image_count: number;
current_image_count: number;
new_images_count: number;
dataset_changed: boolean;
} | null;
};

View File

@ -11,6 +11,7 @@ import {
CustomClassificationModelConfig, CustomClassificationModelConfig,
FrigateConfig, FrigateConfig,
} from "@/types/frigateConfig"; } from "@/types/frigateConfig";
import { ClassificationDatasetResponse } from "@/types/classification";
import { useCallback, useEffect, useMemo, useState } from "react"; import { useCallback, useEffect, useMemo, useState } from "react";
import { useTranslation } from "react-i18next"; import { useTranslation } from "react-i18next";
import { FaFolderPlus } from "react-icons/fa"; import { FaFolderPlus } from "react-icons/fa";
@ -209,9 +210,10 @@ type ModelCardProps = {
function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) { function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
const { t } = useTranslation(["views/classificationModel"]); const { t } = useTranslation(["views/classificationModel"]);
const { data: dataset } = useSWR<{ const { data: dataset } = useSWR<ClassificationDatasetResponse>(
[id: string]: string[]; `classification/${config.name}/dataset`,
}>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); { revalidateOnFocus: false },
);
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const [editDialogOpen, setEditDialogOpen] = useState(false); const [editDialogOpen, setEditDialogOpen] = useState(false);
@ -260,20 +262,25 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
}, []); }, []);
const coverImage = useMemo(() => { const coverImage = useMemo(() => {
if (!dataset) { if (!dataset || !dataset.categories) {
return undefined; return undefined;
} }
const keys = Object.keys(dataset).filter((key) => key != "none"); const keys = Object.keys(dataset.categories).filter((key) => key != "none");
const selectedKey = keys[0]; if (keys.length === 0) {
return undefined;
}
if (!dataset[selectedKey]) { const selectedKey = keys[0];
const images = dataset.categories[selectedKey];
if (!images || images.length === 0) {
return undefined; return undefined;
} }
return { return {
name: selectedKey, name: selectedKey,
img: dataset[selectedKey][0], img: images[0],
}; };
}, [dataset]); }, [dataset]);
@ -317,11 +324,19 @@ function ModelCard({ config, onClick, onUpdate, onDelete }: ModelCardProps) {
)} )}
onClick={onClick} onClick={onClick}
> >
<img {coverImage ? (
className="size-full" <>
src={`${baseUrl}clips/${config.name}/dataset/${coverImage?.name}/${coverImage?.img}`} <img
/> className="size-full"
<ImageShadowOverlay lowerClassName="h-[30%] z-0" /> src={`${baseUrl}clips/${config.name}/dataset/${coverImage.name}/${coverImage.img}`}
/>
<ImageShadowOverlay lowerClassName="h-[30%] z-0" />
</>
) : (
<div className="flex size-full items-center justify-center bg-background_alt">
<MdModelTraining className="size-16 text-muted-foreground" />
</div>
)}
<div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize"> <div className="absolute bottom-2 left-3 text-lg text-white smart-capitalize">
{config.name} {config.name}
</div> </div>

View File

@ -59,7 +59,11 @@ import { useNavigate } from "react-router-dom";
import { IoMdArrowRoundBack } from "react-icons/io"; import { IoMdArrowRoundBack } from "react-icons/io";
import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog"; import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog";
import useApiFilter from "@/hooks/use-api-filter"; import useApiFilter from "@/hooks/use-api-filter";
import { ClassificationItemData, TrainFilter } from "@/types/classification"; import {
ClassificationDatasetResponse,
ClassificationItemData,
TrainFilter,
} from "@/types/classification";
import { import {
ClassificationCard, ClassificationCard,
GroupedClassificationCard, GroupedClassificationCard,
@ -118,16 +122,10 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
const { data: trainImages, mutate: refreshTrain } = useSWR<string[]>( const { data: trainImages, mutate: refreshTrain } = useSWR<string[]>(
`classification/${model.name}/train`, `classification/${model.name}/train`,
); );
const { data: datasetResponse, mutate: refreshDataset } = useSWR<{ const { data: datasetResponse, mutate: refreshDataset } =
categories: { [id: string]: string[] }; useSWR<ClassificationDatasetResponse>(
training_metadata: { `classification/${model.name}/dataset`,
has_trained: boolean; );
last_training_date: string | null;
last_training_image_count: number;
current_image_count: number;
new_images_count: number;
} | null;
}>(`classification/${model.name}/dataset`);
const dataset = datasetResponse?.categories || {}; const dataset = datasetResponse?.categories || {};
const trainingMetadata = datasetResponse?.training_metadata; const trainingMetadata = datasetResponse?.training_metadata;
@ -264,10 +262,11 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
); );
} }
// Always refresh dataset to update the categories list
refreshDataset();
if (pageToggle == "train") { if (pageToggle == "train") {
refreshTrain(); refreshTrain();
} else {
refreshDataset();
} }
} }
}) })
@ -445,7 +444,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
variant={modelState == "failed" ? "destructive" : "select"} variant={modelState == "failed" ? "destructive" : "select"}
disabled={ disabled={
(modelState != "complete" && modelState != "failed") || (modelState != "complete" && modelState != "failed") ||
(trainingMetadata?.new_images_count ?? 0) === 0 !trainingMetadata?.dataset_changed
} }
> >
{modelState == "training" ? ( {modelState == "training" ? (
@ -466,14 +465,14 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
)} )}
</Button> </Button>
</TooltipTrigger> </TooltipTrigger>
{((trainingMetadata?.new_images_count ?? 0) === 0 || {(!trainingMetadata?.dataset_changed ||
(modelState != "complete" && modelState != "failed")) && ( (modelState != "complete" && modelState != "failed")) && (
<TooltipPortal> <TooltipPortal>
<TooltipContent> <TooltipContent>
{modelState == "training" {modelState == "training"
? t("tooltip.trainingInProgress") ? t("tooltip.trainingInProgress")
: trainingMetadata?.new_images_count === 0 : !trainingMetadata?.dataset_changed
? t("tooltip.noNewImages") ? t("tooltip.noChanges")
: t("tooltip.modelNotReady")} : t("tooltip.modelNotReady")}
</TooltipContent> </TooltipContent>
</TooltipPortal> </TooltipPortal>
@ -571,27 +570,44 @@ function LibrarySelector({
> >
<DialogContent> <DialogContent>
<DialogHeader> <DialogHeader>
<DialogTitle>{t("deleteCategory.title")}</DialogTitle> <DialogTitle>
{Object.keys(dataset).length <= 2
? t("deleteCategory.minClassesTitle")
: t("deleteCategory.title")}
</DialogTitle>
<DialogDescription> <DialogDescription>
{t("deleteCategory.desc", { name: confirmDelete })} {Object.keys(dataset).length <= 2
? t("deleteCategory.minClassesDesc")
: t("deleteCategory.desc", { name: confirmDelete })}
</DialogDescription> </DialogDescription>
</DialogHeader> </DialogHeader>
<div className="flex justify-end gap-2"> <div className="flex justify-end gap-2">
<Button variant="outline" onClick={() => setConfirmDelete(null)}> {Object.keys(dataset).length <= 2 ? (
{t("button.cancel", { ns: "common" })} <Button variant="outline" onClick={() => setConfirmDelete(null)}>
</Button> {t("button.ok", { ns: "common" })}
<Button </Button>
variant="destructive" ) : (
className="text-white" <>
onClick={() => { <Button
if (confirmDelete) { variant="outline"
handleDeleteCategory(confirmDelete); onClick={() => setConfirmDelete(null)}
setConfirmDelete(null); >
} {t("button.cancel", { ns: "common" })}
}} </Button>
> <Button
{t("button.delete", { ns: "common" })} variant="destructive"
</Button> className="text-white"
onClick={() => {
if (confirmDelete) {
handleDeleteCategory(confirmDelete);
setConfirmDelete(null);
}
}}
>
{t("button.delete", { ns: "common" })}
</Button>
</>
)}
</div> </div>
</DialogContent> </DialogContent>
</Dialog> </Dialog>