diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 87de52884..a2aec6898 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -595,9 +595,13 @@ def get_classification_dataset(name: str): "last_training_image_count": 0, "current_image_count": current_image_count, "new_images_count": current_image_count, + "dataset_changed": current_image_count > 0, } else: last_training_count = metadata.get("last_training_image_count", 0) + # Dataset has changed if count is different (either added or deleted images) + dataset_changed = current_image_count != last_training_count + # Only show positive count for new images (ignore deletions in the count display) new_images_count = max(0, current_image_count - last_training_count) training_metadata = { "has_trained": True, @@ -605,6 +609,7 @@ def get_classification_dataset(name: str): "last_training_image_count": last_training_count, "current_image_count": current_image_count, "new_images_count": new_images_count, + "dataset_changed": dataset_changed, } return JSONResponse( @@ -948,31 +953,29 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample dependencies=[Depends(require_role(["admin"]))], summary="Delete a classification model", description="""Deletes a specific classification model and all its associated data. - The name must exist in the classification models. Returns a success message or an error if the name is invalid.""", + Works even if the model is not in the config (e.g., partially created during wizard). + Returns a success message.""", ) def delete_classification_model(request: Request, name: str): - config: FrigateConfig = request.app.frigate_config - - if name not in config.classification.custom: - return JSONResponse( - content=( - { - "success": False, - "message": f"{name} is not a known classification model.", - } - ), - status_code=404, - ) + sanitized_name = sanitize_filename(name) # Delete the classification model's data directory in clips - data_dir = os.path.join(CLIPS_DIR, sanitize_filename(name)) + data_dir = os.path.join(CLIPS_DIR, sanitized_name) if os.path.exists(data_dir): - shutil.rmtree(data_dir) + try: + shutil.rmtree(data_dir) + logger.info(f"Deleted classification data directory for {name}") + except Exception as e: + logger.debug(f"Failed to delete data directory for {name}: {e}") # Delete the classification model's files in model_cache - model_dir = os.path.join(MODEL_CACHE_DIR, sanitize_filename(name)) + model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name) if os.path.exists(model_dir): - shutil.rmtree(model_dir) + try: + shutil.rmtree(model_dir) + logger.info(f"Deleted classification model directory for {name}") + except Exception as e: + logger.debug(f"Failed to delete model directory for {name}: {e}") return JSONResponse( content=( diff --git a/frigate/data_processing/common/audio_transcription/model.py b/frigate/data_processing/common/audio_transcription/model.py index 0fe5ddb5c..82472ad62 100644 --- a/frigate/data_processing/common/audio_transcription/model.py +++ b/frigate/data_processing/common/audio_transcription/model.py @@ -4,7 +4,6 @@ import logging import os import sherpa_onnx -from faster_whisper.utils import download_model from frigate.comms.inter_process import InterProcessRequestor from frigate.const import MODEL_CACHE_DIR @@ -25,6 +24,9 @@ class AudioTranscriptionModelRunner: if model_size == "large": # use the Whisper download function instead of our own + # Import dynamically to avoid crashes on systems without AVX support + from faster_whisper.utils import download_model + logger.debug("Downloading Whisper audio transcription model") download_model( size_or_id="small" if device == "cuda" else "tiny", diff --git a/frigate/data_processing/post/audio_transcription.py b/frigate/data_processing/post/audio_transcription.py index 066287707..870c34068 100644 --- a/frigate/data_processing/post/audio_transcription.py +++ b/frigate/data_processing/post/audio_transcription.py @@ -6,7 +6,6 @@ import threading import time from typing import Optional -from faster_whisper import WhisperModel from peewee import DoesNotExist from frigate.comms.inter_process import InterProcessRequestor @@ -51,6 +50,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): def __build_recognizer(self) -> None: try: + # Import dynamically to avoid crashes on systems without AVX support + from faster_whisper import WhisperModel + self.recognizer = WhisperModel( model_size_or_path="small", device="cuda" diff --git a/frigate/detectors/detection_runners.py b/frigate/detectors/detection_runners.py index 6eb3a32fc..80d4e0487 100644 --- a/frigate/detectors/detection_runners.py +++ b/frigate/detectors/detection_runners.py @@ -394,7 +394,11 @@ class OpenVINOModelRunner(BaseModelRunner): self.infer_request.set_input_tensor(input_index, input_tensor) # Run inference - self.infer_request.infer() + try: + self.infer_request.infer() + except Exception as e: + logger.error(f"Error during OpenVINO inference: {e}") + return [] # Get all output tensors outputs = [] diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 5689511a8..01d011ae2 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -472,7 +472,7 @@ class Embeddings: ) thumbnail_missing = True except DoesNotExist: - logger.warning( + logger.debug( f"Event ID {trigger.data} for trigger {trigger_name} does not exist." ) continue diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index 2bae0c0ce..f8aef1b8f 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -16,6 +16,7 @@ "tooltip": { "trainingInProgress": "Model is currently training", "noNewImages": "No new images to train. Classify more images in the dataset first.", + "noChanges": "No changes to the dataset since last training.", "modelNotReady": "Model is not ready for training" }, "toast": { @@ -43,7 +44,9 @@ }, "deleteCategory": { "title": "Delete Class", - "desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model." + "desc": "Are you sure you want to delete the class {{name}}? This will permanently delete all associated images and require re-training the model.", + "minClassesTitle": "Cannot Delete Class", + "minClassesDesc": "A classification model must have at least 2 classes. Add another class before deleting this one." }, "deleteModel": { "title": "Delete Classification Model", diff --git a/web/src/components/classification/ClassificationModelWizardDialog.tsx b/web/src/components/classification/ClassificationModelWizardDialog.tsx index e67a95f89..06bf1f850 100644 --- a/web/src/components/classification/ClassificationModelWizardDialog.tsx +++ b/web/src/components/classification/ClassificationModelWizardDialog.tsx @@ -15,6 +15,7 @@ import Step3ChooseExamples, { } from "./wizard/Step3ChooseExamples"; import { cn } from "@/lib/utils"; import { isDesktop } from "react-device-detect"; +import axios from "axios"; const OBJECT_STEPS = [ "wizard.steps.nameAndDefine", @@ -120,7 +121,18 @@ export default function ClassificationModelWizardDialog({ dispatch({ type: "PREVIOUS_STEP" }); }; - const handleCancel = () => { + const handleCancel = async () => { + // Clean up any generated training images if we're cancelling from Step 3 + if (wizardState.step1Data && wizardState.step3Data?.examplesGenerated) { + try { + await axios.delete( + `/classification/${wizardState.step1Data.modelName}`, + ); + } catch (error) { + // Silently fail - user is already cancelling + } + } + dispatch({ type: "RESET" }); onClose(); }; diff --git a/web/src/components/classification/wizard/Step3ChooseExamples.tsx b/web/src/components/classification/wizard/Step3ChooseExamples.tsx index f638c01e3..e4c157526 100644 --- a/web/src/components/classification/wizard/Step3ChooseExamples.tsx +++ b/web/src/components/classification/wizard/Step3ChooseExamples.tsx @@ -165,18 +165,15 @@ export default function Step3ChooseExamples({ const isLastClass = currentClassIndex === allClasses.length - 1; if (isLastClass) { - // Assign remaining unclassified images - unknownImages.slice(0, 24).forEach((imageName) => { - if (!newClassifications[imageName]) { - // For state models with 2 classes, assign to the last class - // For object models, assign to "none" - if (step1Data.modelType === "state" && allClasses.length === 2) { - newClassifications[imageName] = allClasses[allClasses.length - 1]; - } else { + // For object models, assign remaining unclassified images to "none" + // For state models, this should never happen since we require all images to be classified + if (step1Data.modelType !== "state") { + unknownImages.slice(0, 24).forEach((imageName) => { + if (!newClassifications[imageName]) { newClassifications[imageName] = "none"; } - } - }); + }); + } // All done, trigger training immediately setImageClassifications(newClassifications); @@ -316,8 +313,15 @@ export default function Step3ChooseExamples({ return images; } - return images.filter((img) => !imageClassifications[img]); - }, [unknownImages, imageClassifications]); + // If we're viewing a previous class (going back), show images for that class + // Otherwise show only unclassified images + const currentClassInView = allClasses[currentClassIndex]; + return images.filter((img) => { + const imgClass = imageClassifications[img]; + // Show if: unclassified OR classified with current class we're viewing + return !imgClass || imgClass === currentClassInView; + }); + }, [unknownImages, imageClassifications, allClasses, currentClassIndex]); const allImagesClassified = useMemo(() => { return unclassifiedImages.length === 0; @@ -326,15 +330,26 @@ export default function Step3ChooseExamples({ // For state models on the last class, require all images to be classified const isLastClass = currentClassIndex === allClasses.length - 1; const canProceed = useMemo(() => { - if ( - step1Data.modelType === "state" && - isLastClass && - !allImagesClassified - ) { - return false; + if (step1Data.modelType === "state" && isLastClass) { + // Check if all 24 images will be classified after current selections are applied + const totalImages = unknownImages.slice(0, 24).length; + + // Count images that will be classified (either already classified or currently selected) + const allImages = unknownImages.slice(0, 24); + const willBeClassified = allImages.filter((img) => { + return imageClassifications[img] || selectedImages.has(img); + }).length; + + return willBeClassified >= totalImages; } return true; - }, [step1Data.modelType, isLastClass, allImagesClassified]); + }, [ + step1Data.modelType, + isLastClass, + unknownImages, + imageClassifications, + selectedImages, + ]); const handleBack = useCallback(() => { if (currentClassIndex > 0) { diff --git a/web/src/components/overlay/ImageShadowOverlay.tsx b/web/src/components/overlay/ImageShadowOverlay.tsx index 85791eec1..4f822572d 100644 --- a/web/src/components/overlay/ImageShadowOverlay.tsx +++ b/web/src/components/overlay/ImageShadowOverlay.tsx @@ -12,13 +12,13 @@ export function ImageShadowOverlay({ <>
diff --git a/web/src/components/player/BirdseyeLivePlayer.tsx b/web/src/components/player/BirdseyeLivePlayer.tsx index f94e9aca2..3dcd6afe7 100644 --- a/web/src/components/player/BirdseyeLivePlayer.tsx +++ b/web/src/components/player/BirdseyeLivePlayer.tsx @@ -77,7 +77,10 @@ export default function BirdseyeLivePlayer({ )} onClick={onClick} > - +
{player}
diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index 3e7dcde00..9500688f5 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -331,7 +331,10 @@ export default function LivePlayer({ > {cameraEnabled && ((showStillWithoutActivity && !liveReady) || liveReady) && ( - + )} {player} {cameraEnabled && diff --git a/web/src/context/detail-stream-context.tsx b/web/src/context/detail-stream-context.tsx index 57971f7ac..67c06f981 100644 --- a/web/src/context/detail-stream-context.tsx +++ b/web/src/context/detail-stream-context.tsx @@ -1,4 +1,10 @@ -import React, { createContext, useContext, useState, useEffect } from "react"; +import React, { + createContext, + useContext, + useState, + useEffect, + useRef, +} from "react"; import { FrigateConfig } from "@/types/frigateConfig"; import useSWR from "swr"; @@ -36,6 +42,23 @@ export function DetailStreamProvider({ () => initialSelectedObjectIds ?? [], ); + // When the parent provides a new initialSelectedObjectIds (for example + // when navigating between search results) update the selection so children + // like `ObjectTrackOverlay` receive the new ids immediately. We only + // perform this update when the incoming value actually changes. + useEffect(() => { + if ( + initialSelectedObjectIds && + (initialSelectedObjectIds.length !== selectedObjectIds.length || + initialSelectedObjectIds.some((v, i) => selectedObjectIds[i] !== v)) + ) { + setSelectedObjectIds(initialSelectedObjectIds); + } + // Intentionally include selectedObjectIds to compare previous value and + // avoid overwriting user interactions unless the incoming prop changed. + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [initialSelectedObjectIds]); + const toggleObjectSelection = (id: string | undefined) => { if (id === undefined) { setSelectedObjectIds([]); @@ -63,10 +86,33 @@ export function DetailStreamProvider({ setAnnotationOffset(cfgOffset); }, [config, camera]); - // Clear selected objects when exiting detail mode or changing cameras + // Clear selected objects when exiting detail mode or when the camera + // changes for providers that are not initialized with an explicit + // `initialSelectedObjectIds` (e.g., the RecordingView). For providers + // that receive `initialSelectedObjectIds` (like SearchDetailDialog) we + // avoid clearing on camera change to prevent a race with children that + // immediately set selection when mounting. + const prevCameraRef = useRef(undefined); useEffect(() => { - setSelectedObjectIds([]); - }, [isDetailMode, camera]); + // Always clear when leaving detail mode + if (!isDetailMode) { + setSelectedObjectIds([]); + prevCameraRef.current = camera; + return; + } + + // If camera changed and the parent did not provide initialSelectedObjectIds, + // clear selection to preserve previous behavior. + if ( + prevCameraRef.current !== undefined && + prevCameraRef.current !== camera && + initialSelectedObjectIds === undefined + ) { + setSelectedObjectIds([]); + } + + prevCameraRef.current = camera; + }, [isDetailMode, camera, initialSelectedObjectIds]); const value: DetailStreamContextType = { selectedObjectIds, diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 6a3e680f9..b0664534c 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -126,6 +126,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { last_training_image_count: number; current_image_count: number; new_images_count: number; + dataset_changed: boolean; } | null; }>(`classification/${model.name}/dataset`); @@ -264,10 +265,11 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { ); } + // Always refresh dataset to update the categories list + refreshDataset(); + if (pageToggle == "train") { refreshTrain(); - } else { - refreshDataset(); } } }) @@ -445,7 +447,7 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { variant={modelState == "failed" ? "destructive" : "select"} disabled={ (modelState != "complete" && modelState != "failed") || - (trainingMetadata?.new_images_count ?? 0) === 0 + !trainingMetadata?.dataset_changed } > {modelState == "training" ? ( @@ -466,14 +468,14 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { )} - {((trainingMetadata?.new_images_count ?? 0) === 0 || + {(!trainingMetadata?.dataset_changed || (modelState != "complete" && modelState != "failed")) && ( {modelState == "training" ? t("tooltip.trainingInProgress") - : trainingMetadata?.new_images_count === 0 - ? t("tooltip.noNewImages") + : !trainingMetadata?.dataset_changed + ? t("tooltip.noChanges") : t("tooltip.modelNotReady")} @@ -571,27 +573,44 @@ function LibrarySelector({ > - {t("deleteCategory.title")} + + {Object.keys(dataset).length <= 2 + ? t("deleteCategory.minClassesTitle") + : t("deleteCategory.title")} + - {t("deleteCategory.desc", { name: confirmDelete })} + {Object.keys(dataset).length <= 2 + ? t("deleteCategory.minClassesDesc") + : t("deleteCategory.desc", { name: confirmDelete })}
- - + {Object.keys(dataset).length <= 2 ? ( + + ) : ( + <> + + + + )}