Compare commits

...

18 Commits

Author SHA1 Message Date
Nicolas Mowen
bffdddb1a6 Lazy loading 2025-10-23 13:15:35 -06:00
Nicolas Mowen
5523e6afb9 remove underline 2025-10-23 13:03:46 -06:00
Nicolas Mowen
e3512c20e0 Cleanup 2025-10-23 12:35:31 -06:00
Nicolas Mowen
9626b8c3af Add cursor rule for frontend 2025-10-23 12:19:55 -06:00
Nicolas Mowen
b430ef5d86 Cleanup dialog sizing 2025-10-23 12:15:09 -06:00
Nicolas Mowen
5d532f5d79 Add tips and more info 2025-10-23 12:08:07 -06:00
Nicolas Mowen
da47f23898 Small tweaks 2025-10-23 11:38:41 -06:00
Nicolas Mowen
83045849c0 Cleanup sizing and more font colors 2025-10-23 11:35:00 -06:00
Nicolas Mowen
ff9308a0e8 Start with correct type selected 2025-10-23 11:25:21 -06:00
Nicolas Mowen
46cf4eefc3 Adjust form label 2025-10-23 11:23:16 -06:00
Nicolas Mowen
887a1b480d Adjust plus icon 2025-10-23 11:19:25 -06:00
Nicolas Mowen
0d744757b0 Make no models view more specific 2025-10-23 11:03:52 -06:00
Nicolas Mowen
b3b990b636 Add retry button for image generation 2025-10-23 10:29:51 -06:00
Nicolas Mowen
daab99e692 Adjust wording 2025-10-23 10:24:39 -06:00
Nicolas Mowen
b35be40933 Remove unused translation keys 2025-10-23 08:44:26 -06:00
Nicolas Mowen
0a569fa3c0 Improve image selection mechanism 2025-10-23 08:40:09 -06:00
Nicolas Mowen
855021dfc4 Add loading when hitting continue 2025-10-23 07:31:31 -06:00
Nicolas Mowen
82c236f349 Dynamically add metrics for new model 2025-10-23 07:28:58 -06:00
14 changed files with 474 additions and 230 deletions

View File

@ -0,0 +1,6 @@
---
globs: ["**/*.ts", "**/*.tsx"]
alwaysApply: false
---
Never write strings in the frontend directly, always write to and reference the relevant translations file.

View File

@ -12,7 +12,18 @@ Object classification models are lightweight and run very fast on CPU. Inference
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
### Sub label vs Attribute
## Classes
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
For object classification:
- Define classes that represent different types or attributes of the detected object
- Examples: For `person` objects, classes might be `delivery_person`, `resident`, `stranger`
- Include a `none` class for objects that don't fit any specific category
- Keep classes visually distinct to improve accuracy
### Classification Type
- **Sub label**:

View File

@ -12,6 +12,17 @@ State classification models are lightweight and run very fast on CPU. Inference
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
## Classes
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
For state classification:
- Define classes that represent mutually exclusive states
- Examples: `open` and `closed` for a garage door, `on` and `off` for lights
- Use at least 2 classes (typically binary states work best)
- Keep class names clear and descriptive
## Example use cases
- **Door state**: Detect if a garage or front door is open vs closed.

View File

@ -167,8 +167,7 @@ def train_face(request: Request, name: str, body: dict = None):
new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp"
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
if not os.path.exists(new_file_folder):
os.mkdir(new_file_folder)
os.makedirs(new_file_folder, exist_ok=True)
if training_file_name:
shutil.move(training_file, os.path.join(new_file_folder, new_name))
@ -716,8 +715,7 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
CLIPS_DIR, sanitize_filename(name), "dataset", category
)
if not os.path.exists(new_file_folder):
os.mkdir(new_file_folder)
os.makedirs(new_file_folder, exist_ok=True)
# use opencv because webp images can not be used to train
img = cv2.imread(training_file)

View File

@ -53,9 +53,17 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.tensor_output_details: dict[str, Any] | None = None
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
if (
self.metrics
and self.model_config.name in self.metrics.classification_speeds
):
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
else:
self.inference_speed = None
self.last_run = datetime.datetime.now().timestamp()
self.__build_detector()
@ -83,12 +91,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
def __update_metrics(self, duration: float) -> None:
self.classifications_per_second.update()
self.inference_speed.update(duration)
if self.inference_speed:
self.inference_speed.update(duration)
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
camera = frame_data.get("camera")
if camera not in self.model_config.state_config.cameras:
@ -223,9 +233,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.detected_objects: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
if (
self.metrics
and self.model_config.name in self.metrics.classification_speeds
):
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
else:
self.inference_speed = None
self.__build_detector()
@redirect_output_to_logger(logger, logging.DEBUG)
@ -251,12 +269,14 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
def __update_metrics(self, duration: float) -> None:
self.classifications_per_second.update()
self.inference_speed.update(duration)
if self.inference_speed:
self.inference_speed.update(duration)
def process_frame(self, obj_data, frame):
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if obj_data["false_positive"]:
return

View File

@ -1,4 +1,5 @@
{
"documentTitle": "Classification Models",
"button": {
"deleteClassificationAttempts": "Delete Classification Images",
"renameCategory": "Rename Class",
@ -51,20 +52,26 @@
"categorizeImageAs": "Classify Image As:",
"categorizeImage": "Classify Image",
"noModels": {
"title": "No Classification Models",
"description": "Create a custom model to classify objects or monitor state changes in your cameras.",
"buttonText": "Create Classification Model"
"object": {
"title": "No Object Classification Models",
"description": "Create a custom model to classify detected objects.",
"buttonText": "Create Object Model"
},
"state": {
"title": "No State Classification Models",
"description": "Create a custom model to monitor and classify state changes in specific camera areas.",
"buttonText": "Create State Model"
}
},
"wizard": {
"title": "Create New Classification",
"steps": {
"nameAndDefine": "Name & Define",
"stateArea": "State Area",
"chooseExamples": "Choose Examples",
"train": "Train"
"chooseExamples": "Choose Examples"
},
"step1": {
"description": "Create a new state or object classification model.",
"description": "State models monitor fixed camera areas for changes (e.g., door open/closed). Object models add classifications to detected objects (e.g., known animals, delivery persons, etc.).",
"name": "Name",
"namePlaceholder": "Enter model name...",
"type": "Type",
@ -73,9 +80,14 @@
"objectLabel": "Object Label",
"objectLabelPlaceholder": "Select object type...",
"classificationType": "Classification Type",
"classificationTypeTip": "Learn about classification types",
"classificationTypeDesc": "Sub Labels add additional text to the object label (e.g., 'Person: UPS'). Attributes are searchable metadata stored separately in the object metadata.",
"classificationSubLabel": "Sub Label",
"classificationAttribute": "Attribute",
"classes": "Classes",
"classesTip": "Learn about classes",
"classesStateDesc": "Define the different states your camera area can be in. For example: 'open' and 'closed' for a garage door.",
"classesObjectDesc": "Define the different categories to classify detected objects into. For example: 'delivery_person', 'resident', 'stranger' for person classification.",
"classPlaceholder": "Enter class name...",
"errors": {
"nameRequired": "Model name is required",
@ -96,18 +108,17 @@
"selectCameraPrompt": "Select a camera from the list to define its monitoring area"
},
"step3": {
"description": "Classify the example images below. These samples will be used to train your model.",
"selectImagesPrompt": "Select all images with: {{className}}",
"selectImagesDescription": "Click on images to select them. Click Continue when you're done with this class.",
"generating": {
"title": "Generating Sample Images",
"description": "We're pulling representative images from your recordings. This may take a moment..."
"description": "Frigate is pulling representative images from your recordings. This may take a moment..."
},
"training": {
"title": "Training Model",
"description": "Your model is being trained in the background. You can close this wizard and the training will continue."
"description": "Your model is being trained in the background. Close this dialog, and your model will start running as soon as training is complete."
},
"retryGenerate": "Retry Generation",
"selectClass": "Select class...",
"none": "None",
"noImages": "No sample images generated",
"classifying": "Classifying & Training...",
"trainingStarted": "Training started successfully",

View File

@ -5,10 +5,6 @@
"invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens."
},
"details": {
"subLabelScore": "Sub Label Score",
"scoreInfo": "The sub label score is the weighted score for all of the recognized face confidences, so this may differ from the score shown on the snapshot.",
"face": "Face Details",
"faceDesc": "Details of the tracked object that generated this face",
"timestamp": "Timestamp",
"unknown": "Unknown"
},
@ -19,8 +15,6 @@
},
"collections": "Collections",
"createFaceLibrary": {
"title": "Create Collection",
"desc": "Create a new collection",
"new": "Create New Face",
"nextSteps": "To build a strong foundation:<li>Use the Recent Recognitions tab to select and train on images for each detected person.</li><li>Focus on straight-on images for best results; avoid training images that capture faces at an angle.</li></ul>"
},
@ -37,8 +31,6 @@
"aria": "Select recent recognitions",
"empty": "There are no recent face recognition attempts"
},
"selectItem": "Select {{item}}",
"selectFace": "Select Face",
"deleteFaceLibrary": {
"title": "Delete Name",
"desc": "Are you sure you want to delete the collection {{name}}? This will permanently delete all associated faces."
@ -69,7 +61,6 @@
"maxSize": "Max size: {{size}}MB"
},
"nofaces": "No faces available",
"pixels": "{{area}}px",
"trainFaceAs": "Train Face as:",
"trainFace": "Train Face",
"toast": {

View File

@ -126,6 +126,7 @@ export const ClassificationCard = forwardRef<
imgClassName,
isMobile && "w-full",
)}
loading="lazy"
onLoad={() => setImageLoaded(true)}
src={`${baseUrl}${data.filepath}`}
/>

View File

@ -30,6 +30,7 @@ const STATE_STEPS = [
type ClassificationModelWizardDialogProps = {
open: boolean;
onClose: () => void;
defaultModelType?: "state" | "object";
};
type WizardState = {
@ -92,6 +93,7 @@ function wizardReducer(state: WizardState, action: WizardAction): WizardState {
export default function ClassificationModelWizardDialog({
open,
onClose,
defaultModelType,
}: ClassificationModelWizardDialogProps) {
const { t } = useTranslation(["views/classificationModel"]);
@ -135,7 +137,12 @@ export default function ClassificationModelWizardDialog({
<DialogContent
className={cn(
"",
isDesktop && "max-h-[75dvh] max-w-6xl overflow-y-auto",
isDesktop &&
wizardState.currentStep == 0 &&
"max-h-[90%] overflow-y-auto xl:max-h-[80%]",
isDesktop &&
wizardState.currentStep > 0 &&
"max-h-[90%] max-w-[70%] overflow-y-auto xl:max-h-[80%]",
)}
onInteractOutside={(e) => {
e.preventDefault();
@ -166,6 +173,7 @@ export default function ClassificationModelWizardDialog({
{wizardState.currentStep === 0 && (
<Step1NameAndDefine
initialData={wizardState.step1Data}
defaultModelType={defaultModelType}
onNext={handleStep1Next}
onCancel={handleCancel}
/>

View File

@ -22,11 +22,16 @@ import { zodResolver } from "@hookform/resolvers/zod";
import { z } from "zod";
import { useTranslation } from "react-i18next";
import { useMemo } from "react";
import { LuX } from "react-icons/lu";
import { MdAddBox } from "react-icons/md";
import { LuX, LuPlus, LuInfo, LuExternalLink } from "react-icons/lu";
import useSWR from "swr";
import { FrigateConfig } from "@/types/frigateConfig";
import { getTranslatedLabel } from "@/utils/i18n";
import { useDocDomain } from "@/hooks/use-doc-domain";
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
export type ModelType = "state" | "object";
export type ObjectClassificationType = "sub_label" | "attribute";
@ -41,17 +46,20 @@ export type Step1FormData = {
type Step1NameAndDefineProps = {
initialData?: Partial<Step1FormData>;
defaultModelType?: "state" | "object";
onNext: (data: Step1FormData) => void;
onCancel: () => void;
};
export default function Step1NameAndDefine({
initialData,
defaultModelType,
onNext,
onCancel,
}: Step1NameAndDefineProps) {
const { t } = useTranslation(["views/classificationModel"]);
const { data: config } = useSWR<FrigateConfig>("config");
const { getLocaleDocUrl } = useDocDomain();
const objectLabels = useMemo(() => {
if (!config) return [];
@ -147,7 +155,7 @@ export default function Step1NameAndDefine({
resolver: zodResolver(step1FormData),
defaultValues: {
modelName: initialData?.modelName || "",
modelType: initialData?.modelType || "state",
modelType: initialData?.modelType || defaultModelType || "state",
objectLabel: initialData?.objectLabel,
objectType: initialData?.objectType || "sub_label",
classes: initialData?.classes?.length ? initialData.classes : [""],
@ -194,7 +202,9 @@ export default function Step1NameAndDefine({
name="modelName"
render={({ field }) => (
<FormItem>
<FormLabel>{t("wizard.step1.name")}</FormLabel>
<FormLabel className="text-primary-variant">
{t("wizard.step1.name")}
</FormLabel>
<FormControl>
<Input
className="h-8"
@ -212,7 +222,9 @@ export default function Step1NameAndDefine({
name="modelType"
render={({ field }) => (
<FormItem>
<FormLabel>{t("wizard.step1.type")}</FormLabel>
<FormLabel className="text-primary-variant">
{t("wizard.step1.type")}
</FormLabel>
<FormControl>
<RadioGroup
onValueChange={field.onChange}
@ -261,7 +273,9 @@ export default function Step1NameAndDefine({
name="objectLabel"
render={({ field }) => (
<FormItem>
<FormLabel>{t("wizard.step1.objectLabel")}</FormLabel>
<FormLabel className="text-primary-variant">
{t("wizard.step1.objectLabel")}
</FormLabel>
<Select
onValueChange={field.onChange}
defaultValue={field.value}
@ -297,9 +311,42 @@ export default function Step1NameAndDefine({
name="objectType"
render={({ field }) => (
<FormItem>
<FormLabel>
{t("wizard.step1.classificationType")}
</FormLabel>
<div className="flex items-center gap-1">
<FormLabel className="text-primary-variant">
{t("wizard.step1.classificationType")}
</FormLabel>
<Popover>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="sm"
className="h-4 w-4 p-0"
>
<LuInfo className="size-3" />
</Button>
</PopoverTrigger>
<PopoverContent className="pointer-events-auto w-80 text-xs">
<div className="flex flex-col gap-2">
<div className="text-sm">
{t("wizard.step1.classificationTypeDesc")}
</div>
<div className="mt-3 flex items-center text-primary">
<a
href={getLocaleDocUrl(
"configuration/custom_classification/object_classification#classification-type",
)}
target="_blank"
rel="noopener noreferrer"
className="inline cursor-pointer"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</a>
</div>
</div>
</PopoverContent>
</Popover>
</div>
<FormControl>
<RadioGroup
onValueChange={field.onChange}
@ -345,11 +392,50 @@ export default function Step1NameAndDefine({
<div className="space-y-2">
<div className="flex items-center justify-between">
<FormLabel>{t("wizard.step1.classes")}</FormLabel>
<MdAddBox
className="size-7 cursor-pointer text-primary hover:text-primary/80"
<div className="flex items-center gap-1">
<FormLabel className="text-primary-variant">
{t("wizard.step1.classes")}
</FormLabel>
<Popover>
<PopoverTrigger asChild>
<Button variant="ghost" size="sm" className="h-4 w-4 p-0">
<LuInfo className="size-3" />
</Button>
</PopoverTrigger>
<PopoverContent className="pointer-events-auto w-80 text-xs">
<div className="flex flex-col gap-2">
<div className="text-sm">
{watchedModelType === "state"
? t("wizard.step1.classesStateDesc")
: t("wizard.step1.classesObjectDesc")}
</div>
<div className="mt-3 flex items-center text-primary">
<a
href={getLocaleDocUrl(
watchedModelType === "state"
? "configuration/custom_classification/state_classification"
: "configuration/custom_classification/object_classification",
)}
target="_blank"
rel="noopener noreferrer"
className="inline cursor-pointer"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</a>
</div>
</div>
</PopoverContent>
</Popover>
</div>
<Button
type="button"
variant="secondary"
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
onClick={handleAddClass}
/>
>
<LuPlus />
</Button>
</div>
<div className="space-y-2">
{watchedClasses.map((_, index) => (

View File

@ -8,8 +8,7 @@ import {
PopoverContent,
PopoverTrigger,
} from "@/components/ui/popover";
import { MdAddBox } from "react-icons/md";
import { LuX } from "react-icons/lu";
import { LuX, LuPlus } from "react-icons/lu";
import { Stage, Layer, Rect, Transformer } from "react-konva";
import Konva from "konva";
import { useResizeObserver } from "@/hooks/resize-observer";
@ -247,12 +246,11 @@ export default function Step2StateArea({
<PopoverTrigger asChild>
<Button
type="button"
variant="ghost"
size="icon"
className="size-6 p-0"
variant="secondary"
className="size-6 rounded-md bg-secondary-foreground p-1 text-background"
aria-label="Add camera"
>
<MdAddBox className="size-6 text-primary" />
<LuPlus />
</Button>
</PopoverTrigger>
<PopoverContent
@ -262,7 +260,7 @@ export default function Step2StateArea({
onOpenAutoFocus={(e) => e.preventDefault()}
>
<div className="flex flex-col gap-2">
<Heading as="h4" className="text-sm font-medium">
<Heading as="h4" className="text-sm text-primary-variant">
{t("wizard.step2.selectCamera")}
</Heading>
<div className="scrollbar-container flex max-h-[30vh] flex-col gap-1 overflow-y-auto">
@ -285,7 +283,13 @@ export default function Step2StateArea({
</PopoverContent>
</Popover>
) : (
<MdAddBox className="size-6 cursor-not-allowed text-muted" />
<Button
variant="secondary"
className="size-6 cursor-not-allowed rounded-md bg-muted p-1 text-muted-foreground"
disabled
>
<LuPlus />
</Button>
)}
</div>

View File

@ -1,11 +1,4 @@
import { Button } from "@/components/ui/button";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useTranslation } from "react-i18next";
import { useState, useEffect, useCallback, useMemo } from "react";
import ActivityIndicator from "@/components/indicators/activity-indicator";
@ -47,6 +40,9 @@ export default function Step3ChooseExamples({
[imageName: string]: string;
}>(initialData?.imageClassifications || {});
const [isTraining, setIsTraining] = useState(false);
const [isProcessing, setIsProcessing] = useState(false);
const [currentClassIndex, setCurrentClassIndex] = useState(0);
const [selectedImages, setSelectedImages] = useState<Set<string>>(new Set());
const { data: trainImages, mutate: refreshTrainImages } = useSWR<string[]>(
hasGenerated ? `classification/${step1Data.modelName}/train` : null,
@ -57,16 +53,165 @@ export default function Step3ChooseExamples({
return trainImages;
}, [trainImages]);
const handleClassificationChange = useCallback(
(imageName: string, className: string) => {
setImageClassifications((prev) => ({
...prev,
[imageName]: className,
}));
const toggleImageSelection = useCallback((imageName: string) => {
setSelectedImages((prev) => {
const newSet = new Set(prev);
if (newSet.has(imageName)) {
newSet.delete(imageName);
} else {
newSet.add(imageName);
}
return newSet;
});
}, []);
// Get all classes (excluding "none" - it will be auto-assigned)
const allClasses = useMemo(() => {
return [...step1Data.classes];
}, [step1Data.classes]);
const currentClass = allClasses[currentClassIndex];
const processClassificationsAndTrain = useCallback(
async (classifications: { [imageName: string]: string }) => {
// Step 1: Create config for the new model
const modelConfig: {
enabled: boolean;
name: string;
threshold: number;
state_config?: {
cameras: Record<string, { crop: number[] }>;
motion: boolean;
};
object_config?: { objects: string[]; classification_type: string };
} = {
enabled: true,
name: step1Data.modelName,
threshold: 0.8,
};
if (step1Data.modelType === "state") {
// State model config
const cameras: Record<string, { crop: number[] }> = {};
step2Data?.cameraAreas.forEach((area) => {
cameras[area.camera] = {
crop: area.crop,
};
});
modelConfig.state_config = {
cameras,
motion: true,
};
} else {
// Object model config
modelConfig.object_config = {
objects: step1Data.objectLabel ? [step1Data.objectLabel] : [],
classification_type: step1Data.objectType || "sub_label",
} as { objects: string[]; classification_type: string };
}
// Update config via config API
await axios.put("/config/set", {
requires_restart: 0,
update_topic: `config/classification/custom/${step1Data.modelName}`,
config_data: {
classification: {
custom: {
[step1Data.modelName]: modelConfig,
},
},
},
});
// Step 2: Classify each image by moving it to the correct category folder
const categorizePromises = Object.entries(classifications).map(
([imageName, className]) => {
if (!className) return Promise.resolve();
return axios.post(
`/classification/${step1Data.modelName}/dataset/categorize`,
{
training_file: imageName,
category: className === "none" ? "none" : className,
},
);
},
);
await Promise.all(categorizePromises);
// Step 3: Kick off training
await axios.post(`/classification/${step1Data.modelName}/train`);
toast.success(t("wizard.step3.trainingStarted"));
setIsTraining(true);
},
[],
[step1Data, step2Data, t],
);
const handleContinueClassification = useCallback(async () => {
// Mark selected images with current class
const newClassifications = { ...imageClassifications };
selectedImages.forEach((imageName) => {
newClassifications[imageName] = currentClass;
});
// Check if we're on the last class to select
const isLastClass = currentClassIndex === allClasses.length - 1;
if (isLastClass) {
// Assign remaining unclassified images
unknownImages.slice(0, 24).forEach((imageName) => {
if (!newClassifications[imageName]) {
// For state models with 2 classes, assign to the last class
// For object models, assign to "none"
if (step1Data.modelType === "state" && allClasses.length === 2) {
newClassifications[imageName] = allClasses[allClasses.length - 1];
} else {
newClassifications[imageName] = "none";
}
}
});
// All done, trigger training immediately
setImageClassifications(newClassifications);
setIsProcessing(true);
try {
await processClassificationsAndTrain(newClassifications);
} catch (error) {
const axiosError = error as {
response?: { data?: { message?: string; detail?: string } };
message?: string;
};
const errorMessage =
axiosError.response?.data?.message ||
axiosError.response?.data?.detail ||
axiosError.message ||
"Failed to classify images";
toast.error(
t("wizard.step3.errors.classifyFailed", { error: errorMessage }),
);
setIsProcessing(false);
}
} else {
// Move to next class
setImageClassifications(newClassifications);
setCurrentClassIndex((prev) => prev + 1);
setSelectedImages(new Set());
}
}, [
selectedImages,
currentClass,
currentClassIndex,
allClasses,
imageClassifications,
unknownImages,
step1Data,
processClassificationsAndTrain,
t,
]);
const generateExamples = useCallback(async () => {
setIsGenerating(true);
@ -135,77 +280,9 @@ export default function Step3ChooseExamples({
}, []);
const handleContinue = useCallback(async () => {
setIsProcessing(true);
try {
// Step 1: Create config for the new model
const modelConfig: {
enabled: boolean;
name: string;
threshold: number;
state_config?: {
cameras: Record<string, { crop: number[] }>;
motion: boolean;
};
object_config?: { objects: string[]; classification_type: string };
} = {
enabled: true,
name: step1Data.modelName,
threshold: 0.8,
};
if (step1Data.modelType === "state") {
// State model config
const cameras: Record<string, { crop: number[] }> = {};
step2Data?.cameraAreas.forEach((area) => {
cameras[area.camera] = {
crop: area.crop,
};
});
modelConfig.state_config = {
cameras,
motion: true,
};
} else {
// Object model config
modelConfig.object_config = {
objects: step1Data.objectLabel ? [step1Data.objectLabel] : [],
classification_type: step1Data.objectType || "sub_label",
} as { objects: string[]; classification_type: string };
}
// Update config via config API
await axios.put("/config/set", {
requires_restart: 0,
update_topic: `config/classification/custom/${step1Data.modelName}`,
config_data: {
classification: {
custom: {
[step1Data.modelName]: modelConfig,
},
},
},
});
// Step 2: Classify each image by moving it to the correct category folder
const categorizePromises = Object.entries(imageClassifications).map(
([imageName, className]) => {
if (!className) return Promise.resolve();
return axios.post(
`/classification/${step1Data.modelName}/dataset/categorize`,
{
training_file: imageName,
category: className === "none" ? "none" : className,
},
);
},
);
await Promise.all(categorizePromises);
// Step 3: Kick off training
await axios.post(`/classification/${step1Data.modelName}/train`);
toast.success(t("wizard.step3.trainingStarted"));
setIsTraining(true);
await processClassificationsAndTrain(imageClassifications);
} catch (error) {
const axiosError = error as {
response?: { data?: { message?: string; detail?: string } };
@ -220,14 +297,25 @@ export default function Step3ChooseExamples({
toast.error(
t("wizard.step3.errors.classifyFailed", { error: errorMessage }),
);
setIsProcessing(false);
}
}, [imageClassifications, step1Data, step2Data, t]);
}, [imageClassifications, processClassificationsAndTrain, t]);
const unclassifiedImages = useMemo(() => {
if (!unknownImages) return [];
const images = unknownImages.slice(0, 24);
// Only filter if we have any classifications
if (Object.keys(imageClassifications).length === 0) {
return images;
}
return images.filter((img) => !imageClassifications[img]);
}, [unknownImages, imageClassifications]);
const allImagesClassified = useMemo(() => {
if (!unknownImages || unknownImages.length === 0) return false;
const imagesToClassify = unknownImages.slice(0, 24);
return imagesToClassify.every((img) => imageClassifications[img]);
}, [unknownImages, imageClassifications]);
return unclassifiedImages.length === 0;
}, [unclassifiedImages]);
return (
<div className="flex flex-col gap-6">
@ -260,9 +348,18 @@ export default function Step3ChooseExamples({
</div>
) : hasGenerated ? (
<div className="flex flex-col gap-4">
<div className="text-sm text-muted-foreground">
{t("wizard.step3.description")}
</div>
{!allImagesClassified && (
<div className="text-center">
<h3 className="text-lg font-medium">
{t("wizard.step3.selectImagesPrompt", {
className: currentClass,
})}
</h3>
<p className="text-sm text-muted-foreground">
{t("wizard.step3.selectImagesDescription")}
</p>
</div>
)}
<div
className={cn(
"rounded-lg bg-secondary/30 p-4",
@ -270,58 +367,42 @@ export default function Step3ChooseExamples({
)}
>
{!unknownImages || unknownImages.length === 0 ? (
<div className="flex h-[40vh] items-center justify-center">
<div className="flex h-[40vh] flex-col items-center justify-center gap-4">
<p className="text-muted-foreground">
{t("wizard.step3.noImages")}
</p>
<Button onClick={generateExamples} variant="select">
{t("wizard.step3.retryGenerate")}
</Button>
</div>
) : allImagesClassified && isProcessing ? (
<div className="flex h-[40vh] flex-col items-center justify-center gap-4">
<ActivityIndicator className="size-12" />
<p className="text-lg font-medium">
{t("wizard.step3.classifying")}
</p>
</div>
) : (
<div className="grid grid-cols-2 gap-3 sm:grid-cols-6">
{unknownImages.slice(0, 24).map((imageName, index) => (
<div
key={imageName}
className="group relative aspect-square overflow-hidden rounded-lg border bg-background"
>
<img
src={`${baseUrl}clips/${step1Data.modelName}/train/${imageName}`}
alt={`Example ${index + 1}`}
className="h-full w-full object-cover"
/>
<div className="absolute bottom-0 left-0 right-0 p-2">
<Select
value={imageClassifications[imageName] || ""}
onValueChange={(value) =>
handleClassificationChange(imageName, value)
}
>
<SelectTrigger className="h-7 bg-background/20 text-xs">
<SelectValue
placeholder={t("wizard.step3.selectClass")}
/>
</SelectTrigger>
<SelectContent>
{step1Data.modelType === "object" && (
<SelectItem
value="none"
className="cursor-pointer text-xs"
>
{t("wizard.step3.none")}
</SelectItem>
)}
{step1Data.classes.map((className) => (
<SelectItem
key={className}
value={className}
className="cursor-pointer text-xs"
>
{className}
</SelectItem>
))}
</SelectContent>
</Select>
<div className="grid grid-cols-2 gap-4 sm:grid-cols-6">
{unclassifiedImages.map((imageName, index) => {
const isSelected = selectedImages.has(imageName);
return (
<div
key={imageName}
className={cn(
"aspect-square cursor-pointer overflow-hidden rounded-lg border-2 bg-background transition-all",
isSelected && "border-selected ring-2 ring-selected",
)}
onClick={() => toggleImageSelection(imageName)}
>
<img
src={`${baseUrl}clips/${step1Data.modelName}/train/${imageName}`}
alt={`Example ${index + 1}`}
className="h-full w-full object-cover"
/>
</div>
</div>
))}
);
})}
</div>
)}
</div>
@ -344,11 +425,16 @@ export default function Step3ChooseExamples({
</Button>
<Button
type="button"
onClick={handleContinue}
onClick={
allImagesClassified
? handleContinue
: handleContinueClassification
}
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
disabled={!hasGenerated || isGenerating || !allImagesClassified}
disabled={!hasGenerated || isGenerating || isProcessing}
>
{isProcessing && <ActivityIndicator className="size-4" />}
{t("button.continue", { ns: "common" })}
</Button>
</div>

View File

@ -10,13 +10,14 @@ import {
CustomClassificationModelConfig,
FrigateConfig,
} from "@/types/frigateConfig";
import { useMemo, useState } from "react";
import { useEffect, useMemo, useState } from "react";
import { isMobile } from "react-device-detect";
import { useTranslation } from "react-i18next";
import { FaFolderPlus } from "react-icons/fa";
import { MdModelTraining } from "react-icons/md";
import useSWR from "swr";
import Heading from "@/components/ui/heading";
import { useOverlayState } from "@/hooks/use-overlay-state";
const allModelTypes = ["objects", "states"] as const;
type ModelType = (typeof allModelTypes)[number];
@ -28,8 +29,12 @@ export default function ModelSelectionView({
onClick,
}: ModelSelectionViewProps) {
const { t } = useTranslation(["views/classificationModel"]);
const [page, setPage] = useState<ModelType>("objects");
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
const [page, setPage] = useOverlayState<ModelType>("objects", "objects");
const [pageToggle, setPageToggle] = useOptimisticState(
page || "objects",
setPage,
100,
);
const { data: config, mutate: refreshConfig } = useSWR<FrigateConfig>(
"config",
{
@ -37,6 +42,12 @@ export default function ModelSelectionView({
},
);
// title
useEffect(() => {
document.title = t("documentTitle");
}, [t]);
// data
const classificationConfigs = useMemo(() => {
@ -69,25 +80,11 @@ export default function ModelSelectionView({
return <ActivityIndicator />;
}
if (classificationConfigs.length == 0) {
return (
<>
<ClassificationModelWizardDialog
open={newModel}
onClose={() => {
setNewModel(false);
refreshConfig();
}}
/>
<NoModelsView onCreateModel={() => setNewModel(true)} />;
</>
);
}
return (
<div className="flex size-full flex-col p-2">
<ClassificationModelWizardDialog
open={newModel}
defaultModelType={pageToggle === "objects" ? "object" : "state"}
onClose={() => {
setNewModel(false);
refreshConfig();
@ -103,7 +100,6 @@ export default function ModelSelectionView({
value={pageToggle}
onValueChange={(value: ModelType) => {
if (value) {
// Restrict viewer navigation
setPageToggle(value);
}
}}
@ -136,31 +132,45 @@ export default function ModelSelectionView({
</div>
</div>
<div className="flex size-full gap-2 p-2">
{selectedClassificationConfigs.map((config) => (
<ModelCard
key={config.name}
config={config}
onClick={() => onClick(config)}
{selectedClassificationConfigs.length === 0 ? (
<NoModelsView
onCreateModel={() => setNewModel(true)}
modelType={pageToggle}
/>
))}
) : (
selectedClassificationConfigs.map((config) => (
<ModelCard
key={config.name}
config={config}
onClick={() => onClick(config)}
/>
))
)}
</div>
</div>
);
}
function NoModelsView({ onCreateModel }: { onCreateModel: () => void }) {
function NoModelsView({
onCreateModel,
modelType,
}: {
onCreateModel: () => void;
modelType: ModelType;
}) {
const { t } = useTranslation(["views/classificationModel"]);
const typeKey = modelType === "objects" ? "object" : "state";
return (
<div className="flex size-full items-center justify-center">
<div className="flex flex-col items-center gap-2">
<MdModelTraining className="size-8" />
<Heading as="h4">{t("noModels.title")}</Heading>
<Heading as="h4">{t(`noModels.${typeKey}.title`)}</Heading>
<div className="mb-3 text-center text-secondary-foreground">
{t("noModels.description")}
{t(`noModels.${typeKey}.description`)}
</div>
<Button size="sm" variant="select" onClick={onCreateModel}>
{t("noModels.buttonText")}
{t(`noModels.${typeKey}.buttonText`)}
</Button>
</div>
</div>

View File

@ -642,6 +642,7 @@ function DatasetGrid({
filepath: `clips/${modelName}/dataset/${categoryName}/${image}`,
name: "",
}}
showArea={false}
selected={selectedImages.includes(image)}
i18nLibrary="views/classificationModel"
onClick={(data, _) => onClickImages([data.filename], true)}