Compare commits

...

9 Commits

Author SHA1 Message Date
Josh Hawkins
52b3e3a2f4
Merge c84bfd3ace into 99a363c047 2025-11-10 13:28:52 +00:00
Nicolas Mowen
c84bfd3ace Improve deletion handling for classes 2025-11-10 06:28:47 -07:00
Nicolas Mowen
e0389382f8 Remove images when wizard is cancelled 2025-11-10 06:21:50 -07:00
Nicolas Mowen
8aad89a83a Improve flow handling for classification state 2025-11-10 06:21:44 -07:00
Nicolas Mowen
55dcbc6371 Adjust default rounded corners on larger screens 2025-11-10 05:57:30 -07:00
Nicolas Mowen
b8216d0536 Handle case where classification images are deleted 2025-11-10 05:51:09 -07:00
Josh Hawkins
c0f1fa1f61 fix race condition in detail stream context
navigating between tracked objects in Explore would sometimes prevent the object track from appearing
2025-11-10 06:50:32 -06:00
Nicolas Mowen
c3f242dc53 Catch OpenVINO runtime error 2025-11-10 05:31:04 -07:00
Nicolas Mowen
6ee5f246aa Import faster_whisper conditinally to avoid illegal instruction 2025-11-10 05:26:14 -07:00
12 changed files with 184 additions and 72 deletions

View File

@ -595,9 +595,13 @@ def get_classification_dataset(name: str):
"last_training_image_count": 0,
"current_image_count": current_image_count,
"new_images_count": current_image_count,
"dataset_changed": current_image_count > 0,
}
else:
last_training_count = metadata.get("last_training_image_count", 0)
# Dataset has changed if count is different (either added or deleted images)
dataset_changed = current_image_count != last_training_count
# Only show positive count for new images (ignore deletions in the count display)
new_images_count = max(0, current_image_count - last_training_count)
training_metadata = {
"has_trained": True,
@ -605,6 +609,7 @@ def get_classification_dataset(name: str):
"last_training_image_count": last_training_count,
"current_image_count": current_image_count,
"new_images_count": new_images_count,
"dataset_changed": dataset_changed,
}
return JSONResponse(
@ -948,31 +953,29 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample
dependencies=[Depends(require_role(["admin"]))],
summary="Delete a classification model",
description="""Deletes a specific classification model and all its associated data.
The name must exist in the classification models. Returns a success message or an error if the name is invalid.""",
Works even if the model is not in the config (e.g., partially created during wizard).
Returns a success message.""",
)
def delete_classification_model(request: Request, name: str):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
sanitized_name = sanitize_filename(name)
# Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name))
data_dir = os.path.join(CLIPS_DIR, sanitized_name)
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
try:
shutil.rmtree(data_dir)
logger.info(f"Deleted classification data directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete data directory for {name}: {e}")
# Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name))
model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name)
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
try:
shutil.rmtree(model_dir)
logger.info(f"Deleted classification model directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete model directory for {name}: {e}")
return JSONResponse(
content=(

View File

@ -4,7 +4,6 @@ import logging
import os
import sherpa_onnx
from faster_whisper.utils import download_model
from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR
@ -25,6 +24,9 @@ class AudioTranscriptionModelRunner:
if model_size == "large":
# use the Whisper download function instead of our own
# Import dynamically to avoid crashes on systems without AVX support
from faster_whisper.utils import download_model
logger.debug("Downloading Whisper audio transcription model")
download_model(
size_or_id="small" if device == "cuda" else "tiny",

View File

@ -6,7 +6,6 @@ import threading
import time
from typing import Optional
from faster_whisper import WhisperModel
from peewee import DoesNotExist
from frigate.comms.inter_process import InterProcessRequestor
@ -51,6 +50,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
def __build_recognizer(self) -> None:
try:
# Import dynamically to avoid crashes on systems without AVX support
from faster_whisper import WhisperModel
self.recognizer = WhisperModel(
model_size_or_path="small",
device="cuda"

View File

@ -394,7 +394,11 @@ class OpenVINOModelRunner(BaseModelRunner):
self.infer_request.set_input_tensor(input_index, input_tensor)
# Run inference
self.infer_request.infer()
try:
self.infer_request.infer()
except Exception as e:
logger.error(f"Error during OpenVINO inference: {e}")
return []
# Get all output tensors
outputs = []

View File

@ -16,6 +16,7 @@
"tooltip": {
"trainingInProgress": "Model is currently training",
"noNewImages": "No new images to train. Classify more images in the dataset first.",
"noChanges": "No changes to the dataset since last training.",
"modelNotReady": "Model is not ready for training"
},
"toast": {
@ -43,7 +44,9 @@
},
"deleteCategory": {
"title": "Delete Class",
"desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model."
"desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model.",
"minClassesTitle": "Cannot Delete Class",
"minClassesDesc": "A classification model must have at least 2 classes. Add another class before deleting this one."
},
"deleteModel": {
"title": "Delete Classification Model",

View File

@ -15,6 +15,7 @@ import Step3ChooseExamples, {
} from "./wizard/Step3ChooseExamples";
import { cn } from "@/lib/utils";
import { isDesktop } from "react-device-detect";
import axios from "axios";
const OBJECT_STEPS = [
"wizard.steps.nameAndDefine",
@ -120,7 +121,18 @@ export default function ClassificationModelWizardDialog({
dispatch({ type: "PREVIOUS_STEP" });
};
const handleCancel = () => {
const handleCancel = async () => {
// Clean up any generated training images if we're cancelling from Step 3
if (wizardState.step1Data && wizardState.step3Data?.examplesGenerated) {
try {
await axios.delete(
`/classification/${wizardState.step1Data.modelName}`,
);
} catch (error) {
// Silently fail - user is already cancelling
}
}
dispatch({ type: "RESET" });
onClose();
};

View File

@ -165,18 +165,15 @@ export default function Step3ChooseExamples({
const isLastClass = currentClassIndex === allClasses.length - 1;
if (isLastClass) {
// Assign remaining unclassified images
unknownImages.slice(0, 24).forEach((imageName) => {
if (!newClassifications[imageName]) {
// For state models with 2 classes, assign to the last class
// For object models, assign to "none"
if (step1Data.modelType === "state" && allClasses.length === 2) {
newClassifications[imageName] = allClasses[allClasses.length - 1];
} else {
// For object models, assign remaining unclassified images to "none"
// For state models, this should never happen since we require all images to be classified
if (step1Data.modelType !== "state") {
unknownImages.slice(0, 24).forEach((imageName) => {
if (!newClassifications[imageName]) {
newClassifications[imageName] = "none";
}
}
});
});
}
// All done, trigger training immediately
setImageClassifications(newClassifications);
@ -316,8 +313,15 @@ export default function Step3ChooseExamples({
return images;
}
return images.filter((img) => !imageClassifications[img]);
}, [unknownImages, imageClassifications]);
// If we're viewing a previous class (going back), show images for that class
// Otherwise show only unclassified images
const currentClassInView = allClasses[currentClassIndex];
return images.filter((img) => {
const imgClass = imageClassifications[img];
// Show if: unclassified OR classified with current class we're viewing
return !imgClass || imgClass === currentClassInView;
});
}, [unknownImages, imageClassifications, allClasses, currentClassIndex]);
const allImagesClassified = useMemo(() => {
return unclassifiedImages.length === 0;
@ -326,15 +330,26 @@ export default function Step3ChooseExamples({
// For state models on the last class, require all images to be classified
const isLastClass = currentClassIndex === allClasses.length - 1;
const canProceed = useMemo(() => {
if (
step1Data.modelType === "state" &&
isLastClass &&
!allImagesClassified
) {
return false;
if (step1Data.modelType === "state" && isLastClass) {
// Check if all 24 images will be classified after current selections are applied
const totalImages = unknownImages.slice(0, 24).length;
// Count images that will be classified (either already classified or currently selected)
const allImages = unknownImages.slice(0, 24);
const willBeClassified = allImages.filter((img) => {
return imageClassifications[img] || selectedImages.has(img);
}).length;
return willBeClassified >= totalImages;
}
return true;
}, [step1Data.modelType, isLastClass, allImagesClassified]);
}, [
step1Data.modelType,
isLastClass,
unknownImages,
imageClassifications,
selectedImages,
]);
const handleBack = useCallback(() => {
if (currentClassIndex > 0) {

View File

@ -12,13 +12,13 @@ export function ImageShadowOverlay({
<>
<div
className={cn(
"pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl",
"pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent",
upperClassName,
)}
/>
<div
className={cn(
"pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl",
"pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent",
lowerClassName,
)}
/>

View File

@ -77,7 +77,10 @@ export default function BirdseyeLivePlayer({
)}
onClick={onClick}
>
<ImageShadowOverlay />
<ImageShadowOverlay
upperClassName="md:rounded-2xl"
lowerClassName="md:rounded-2xl"
/>
<div className="size-full" ref={playerRef}>
{player}
</div>

View File

@ -331,7 +331,10 @@ export default function LivePlayer({
>
{cameraEnabled &&
((showStillWithoutActivity && !liveReady) || liveReady) && (
<ImageShadowOverlay />
<ImageShadowOverlay
upperClassName="md:rounded-2xl"
lowerClassName="md:rounded-2xl"
/>
)}
{player}
{cameraEnabled &&

View File

@ -1,4 +1,10 @@
import React, { createContext, useContext, useState, useEffect } from "react";
import React, {
createContext,
useContext,
useState,
useEffect,
useRef,
} from "react";
import { FrigateConfig } from "@/types/frigateConfig";
import useSWR from "swr";
@ -36,6 +42,23 @@ export function DetailStreamProvider({
() => initialSelectedObjectIds ?? [],
);
// When the parent provides a new initialSelectedObjectIds (for example
// when navigating between search results) update the selection so children
// like `ObjectTrackOverlay` receive the new ids immediately. We only
// perform this update when the incoming value actually changes.
useEffect(() => {
if (
initialSelectedObjectIds &&
(initialSelectedObjectIds.length !== selectedObjectIds.length ||
initialSelectedObjectIds.some((v, i) => selectedObjectIds[i] !== v))
) {
setSelectedObjectIds(initialSelectedObjectIds);
}
// Intentionally include selectedObjectIds to compare previous value and
// avoid overwriting user interactions unless the incoming prop changed.
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [initialSelectedObjectIds]);
const toggleObjectSelection = (id: string | undefined) => {
if (id === undefined) {
setSelectedObjectIds([]);
@ -63,10 +86,33 @@ export function DetailStreamProvider({
setAnnotationOffset(cfgOffset);
}, [config, camera]);
// Clear selected objects when exiting detail mode or changing cameras
// Clear selected objects when exiting detail mode or when the camera
// changes for providers that are not initialized with an explicit
// `initialSelectedObjectIds` (e.g., the RecordingView). For providers
// that receive `initialSelectedObjectIds` (like SearchDetailDialog) we
// avoid clearing on camera change to prevent a race with children that
// immediately set selection when mounting.
const prevCameraRef = useRef<string | undefined>(undefined);
useEffect(() => {
setSelectedObjectIds([]);
}, [isDetailMode, camera]);
// Always clear when leaving detail mode
if (!isDetailMode) {
setSelectedObjectIds([]);
prevCameraRef.current = camera;
return;
}
// If camera changed and the parent did not provide initialSelectedObjectIds,
// clear selection to preserve previous behavior.
if (
prevCameraRef.current !== undefined &&
prevCameraRef.current !== camera &&
initialSelectedObjectIds === undefined
) {
setSelectedObjectIds([]);
}
prevCameraRef.current = camera;
}, [isDetailMode, camera, initialSelectedObjectIds]);
const value: DetailStreamContextType = {
selectedObjectIds,

View File

@ -126,6 +126,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
last_training_image_count: number;
current_image_count: number;
new_images_count: number;
dataset_changed: boolean;
} | null;
}>(`classification/${model.name}/dataset`);
@ -264,10 +265,11 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
);
}
// Always refresh dataset to update the categories list
refreshDataset();
if (pageToggle == "train") {
refreshTrain();
} else {
refreshDataset();
}
}
})
@ -445,7 +447,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
variant={modelState == "failed" ? "destructive" : "select"}
disabled={
(modelState != "complete" && modelState != "failed") ||
(trainingMetadata?.new_images_count ?? 0) === 0
!trainingMetadata?.dataset_changed
}
>
{modelState == "training" ? (
@ -466,14 +468,14 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) {
)}
</Button>
</TooltipTrigger>
{((trainingMetadata?.new_images_count ?? 0) === 0 ||
{(!trainingMetadata?.dataset_changed ||
(modelState != "complete" && modelState != "failed")) && (
<TooltipPortal>
<TooltipContent>
{modelState == "training"
? t("tooltip.trainingInProgress")
: trainingMetadata?.new_images_count === 0
? t("tooltip.noNewImages")
: !trainingMetadata?.dataset_changed
? t("tooltip.noChanges")
: t("tooltip.modelNotReady")}
</TooltipContent>
</TooltipPortal>
@ -571,27 +573,44 @@ function LibrarySelector({
>
<DialogContent>
<DialogHeader>
<DialogTitle>{t("deleteCategory.title")}</DialogTitle>
<DialogTitle>
{Object.keys(dataset).length <= 2
? t("deleteCategory.minClassesTitle")
: t("deleteCategory.title")}
</DialogTitle>
<DialogDescription>
{t("deleteCategory.desc", { name: confirmDelete })}
{Object.keys(dataset).length <= 2
? t("deleteCategory.minClassesDesc")
: t("deleteCategory.desc", { name: confirmDelete })}
</DialogDescription>
</DialogHeader>
<div className="flex justify-end gap-2">
<Button variant="outline" onClick={() => setConfirmDelete(null)}>
{t("button.cancel", { ns: "common" })}
</Button>
<Button
variant="destructive"
className="text-white"
onClick={() => {
if (confirmDelete) {
handleDeleteCategory(confirmDelete);
setConfirmDelete(null);
}
}}
>
{t("button.delete", { ns: "common" })}
</Button>
{Object.keys(dataset).length <= 2 ? (
<Button variant="outline" onClick={() => setConfirmDelete(null)}>
{t("button.ok", { ns: "common" })}
</Button>
) : (
<>
<Button
variant="outline"
onClick={() => setConfirmDelete(null)}
>
{t("button.cancel", { ns: "common" })}
</Button>
<Button
variant="destructive"
className="text-white"
onClick={() => {
if (confirmDelete) {
handleDeleteCategory(confirmDelete);
setConfirmDelete(null);
}
}}
>
{t("button.delete", { ns: "common" })}
</Button>
</>
)}
</div>
</DialogContent>
</Dialog>