Generic classification card (#20379)

* Refactor face card into generic classification card

* Update classification data card to use classification card

* Refactor state training grid to use classification card

* Refactor grouped face card into generic component

* Combine classification objects by event

* Fixup

* Cleanup

* Cleanup

* Do not fail if a single event is not found

* Save original frame

* Cleanup

* Undo
This commit is contained in:
Nicolas Mowen 2025-10-07 13:43:06 -06:00 committed by GitHub
parent 4bea69591b
commit 37afd5da6b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 705 additions and 452 deletions

View File

@ -434,10 +434,8 @@ async def event_ids(ids: str, request: Request):
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
await require_camera_access(event.camera, request=request) await require_camera_access(event.camera, request=request)
except DoesNotExist: except DoesNotExist:
return JSONResponse( # we should not fail the entire request if an event is not found
content=({"success": False, "message": f"Event {event_id} not found"}), continue
status_code=404,
)
try: try:
events = Event.select().where(Event.id << ids).dicts().iterator() events = Event.select().where(Event.id << ids).dicts().iterator()

View File

@ -142,7 +142,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
if frame.shape != (224, 224): if frame.shape != (224, 224):
try: try:
frame = cv2.resize(frame, (224, 224)) resized_frame = cv2.resize(frame, (224, 224))
except Exception: except Exception:
logger.warning("Failed to resize image for state classification") logger.warning("Failed to resize image for state classification")
return return
@ -151,13 +151,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
"none-none",
now, now,
"unknown", "unknown",
0.0, 0.0,
) )
return return
input = np.expand_dims(frame, axis=0) input = np.expand_dims(resized_frame, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke() self.interpreter.invoke()
res: np.ndarray = self.interpreter.get_tensor( res: np.ndarray = self.interpreter.get_tensor(
@ -171,6 +172,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR), cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
"none-none",
now, now,
self.labelmap[best_id], self.labelmap[best_id],
score, score,
@ -284,7 +286,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
if crop.shape != (224, 224): if crop.shape != (224, 224):
try: try:
crop = cv2.resize(crop, (224, 224)) resized_crop = cv2.resize(crop, (224, 224))
except Exception: except Exception:
logger.warning("Failed to resize image for state classification") logger.warning("Failed to resize image for state classification")
return return
@ -293,13 +295,14 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
obj_data["id"],
now, now,
"unknown", "unknown",
0.0, 0.0,
) )
return return
input = np.expand_dims(crop, axis=0) input = np.expand_dims(resized_crop, axis=0)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], input)
self.interpreter.invoke() self.interpreter.invoke()
res: np.ndarray = self.interpreter.get_tensor( res: np.ndarray = self.interpreter.get_tensor(
@ -314,6 +317,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
write_classification_attempt( write_classification_attempt(
self.train_dir, self.train_dir,
cv2.cvtColor(crop, cv2.COLOR_RGB2BGR), cv2.cvtColor(crop, cv2.COLOR_RGB2BGR),
obj_data["id"],
now, now,
self.labelmap[best_id], self.labelmap[best_id],
score, score,
@ -372,6 +376,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
def write_classification_attempt( def write_classification_attempt(
folder: str, folder: str,
frame: np.ndarray, frame: np.ndarray,
event_id: str,
timestamp: float, timestamp: float,
label: str, label: str,
score: float, score: float,
@ -379,7 +384,7 @@ def write_classification_attempt(
if "-" in label: if "-" in label:
label = label.replace("-", "_") label = label.replace("-", "_")
file = os.path.join(folder, f"{timestamp}-{label}-{score}.webp") file = os.path.join(folder, f"{event_id}-{timestamp}-{label}-{score}.webp")
os.makedirs(folder, exist_ok=True) os.makedirs(folder, exist_ok=True)
cv2.imwrite(file, frame) cv2.imwrite(file, frame)

View File

@ -263,5 +263,8 @@
"desc": "Page not found" "desc": "Page not found"
}, },
"selectItem": "Select {{item}}", "selectItem": "Select {{item}}",
"readTheDocumentation": "Read the documentation" "readTheDocumentation": "Read the documentation",
"information": {
"pixels": "{{area}}px"
}
} }

View File

@ -5,7 +5,6 @@
"invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens." "invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens."
}, },
"details": { "details": {
"person": "Person",
"subLabelScore": "Sub Label Score", "subLabelScore": "Sub Label Score",
"scoreInfo": "The sub label score is the weighted score for all of the recognized face confidences, so this may differ from the score shown on the snapshot.", "scoreInfo": "The sub label score is the weighted score for all of the recognized face confidences, so this may differ from the score shown on the snapshot.",
"face": "Face Details", "face": "Face Details",

View File

@ -0,0 +1,263 @@
import { baseUrl } from "@/api/baseUrl";
import useContextMenu from "@/hooks/use-contextmenu";
import { cn } from "@/lib/utils";
import {
ClassificationItemData,
ClassificationThreshold,
} from "@/types/classification";
import { Event } from "@/types/event";
import { useMemo, useRef, useState } from "react";
import { isDesktop, isMobile } from "react-device-detect";
import { useTranslation } from "react-i18next";
import TimeAgo from "../dynamic/TimeAgo";
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
import { LuSearch } from "react-icons/lu";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { useNavigate } from "react-router-dom";
import { getTranslatedLabel } from "@/utils/i18n";
type ClassificationCardProps = {
className?: string;
imgClassName?: string;
data: ClassificationItemData;
threshold?: ClassificationThreshold;
selected: boolean;
i18nLibrary: string;
showArea?: boolean;
onClick: (data: ClassificationItemData, meta: boolean) => void;
children?: React.ReactNode;
};
export function ClassificationCard({
className,
imgClassName,
data,
threshold,
selected,
i18nLibrary,
showArea = true,
onClick,
children,
}: ClassificationCardProps) {
const { t } = useTranslation([i18nLibrary]);
const [imageLoaded, setImageLoaded] = useState(false);
const scoreStatus = useMemo(() => {
if (!data.score || !threshold) {
return "unknown";
}
if (data.score >= threshold.recognition) {
return "match";
} else if (data.score >= threshold.unknown) {
return "potential";
} else {
return "unknown";
}
}, [data, threshold]);
// interaction
const imgRef = useRef<HTMLImageElement | null>(null);
useContextMenu(imgRef, () => {
onClick(data, true);
});
const imageArea = useMemo(() => {
if (!showArea || imgRef.current == null || !imageLoaded) {
return undefined;
}
return imgRef.current.naturalWidth * imgRef.current.naturalHeight;
}, [showArea, imageLoaded]);
return (
<>
<div
className={cn(
"relative flex cursor-pointer flex-col rounded-lg outline outline-[3px]",
className,
selected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
)}
>
<div className="relative w-full select-none overflow-hidden rounded-lg">
<img
ref={imgRef}
onLoad={() => setImageLoaded(true)}
className={cn("size-44", imgClassName, isMobile && "w-full")}
src={`${baseUrl}${data.filepath}`}
onClick={(e) => {
e.stopPropagation();
onClick(data, e.metaKey || e.ctrlKey);
}}
/>
{imageArea != undefined && (
<div className="absolute bottom-1 right-1 z-10 rounded-lg bg-black/50 px-2 py-1 text-xs text-white">
{t("information.pixels", { ns: "common", area: imageArea })}
</div>
)}
</div>
<div className="select-none p-2">
<div className="flex w-full flex-row items-center justify-between gap-2">
<div className="flex flex-col items-start text-xs text-primary-variant">
<div className="smart-capitalize">
{data.name == "unknown" ? t("details.unknown") : data.name}
</div>
{data.score && (
<div
className={cn(
"",
scoreStatus == "match" && "text-success",
scoreStatus == "potential" && "text-orange-400",
scoreStatus == "unknown" && "text-danger",
)}
>
{Math.round(data.score * 100)}%
</div>
)}
</div>
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
{children}
</div>
</div>
</div>
</div>
</>
);
}
type GroupedClassificationCardProps = {
group: ClassificationItemData[];
event?: Event;
threshold?: ClassificationThreshold;
selectedItems: string[];
i18nLibrary: string;
objectType: string;
onClick: (data: ClassificationItemData | undefined) => void;
onSelectEvent: (event: Event) => void;
children?: (data: ClassificationItemData) => React.ReactNode;
};
export function GroupedClassificationCard({
group,
event,
threshold,
selectedItems,
i18nLibrary,
objectType,
onClick,
onSelectEvent,
children,
}: GroupedClassificationCardProps) {
const navigate = useNavigate();
const { t } = useTranslation(["views/explore", i18nLibrary]);
// data
const allItemsSelected = useMemo(
() => group.every((data) => selectedItems.includes(data.filename)),
[group, selectedItems],
);
const time = useMemo(() => {
const item = group[0];
if (!item?.timestamp) {
return undefined;
}
return item.timestamp * 1000;
}, [group]);
return (
<div
className={cn(
"flex cursor-pointer flex-col gap-2 rounded-lg bg-card p-2 outline outline-[3px]",
isMobile && "w-full",
allItemsSelected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
)}
onClick={() => {
if (selectedItems.length) {
onClick(undefined);
}
}}
onContextMenu={(e) => {
e.stopPropagation();
e.preventDefault();
onClick(undefined);
}}
>
<div className="flex flex-row justify-between">
<div className="flex flex-col gap-1">
<div className="select-none smart-capitalize">
{getTranslatedLabel(objectType)}
{event?.sub_label
? `: ${event.sub_label} (${Math.round((event.data.sub_label_score || 0) * 100)}%)`
: ": " + t("details.unknown")}
</div>
{time && (
<TimeAgo
className="text-sm text-secondary-foreground"
time={time}
dense
/>
)}
</div>
{event && (
<Tooltip>
<TooltipTrigger>
<div
className="cursor-pointer"
onClick={() => {
navigate(`/explore?event_id=${event.id}`);
}}
>
<LuSearch className="size-4 text-muted-foreground" />
</div>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>
{t("details.item.button.viewInExplore", {
ns: "views/explore",
})}
</TooltipContent>
</TooltipPortal>
</Tooltip>
)}
</div>
<div
className={cn(
"gap-2",
isDesktop
? "flex flex-row flex-wrap"
: "grid grid-cols-2 sm:grid-cols-5 lg:grid-cols-6",
)}
>
{group.map((data: ClassificationItemData) => (
<ClassificationCard
key={data.filename}
data={data}
threshold={threshold}
selected={
allItemsSelected ? false : selectedItems.includes(data.filename)
}
i18nLibrary={i18nLibrary}
onClick={(data, meta) => {
if (meta || selectedItems.length > 0) {
onClick(data);
} else if (event) {
onSelectEvent(event);
}
}}
>
{children?.(data)}
</ClassificationCard>
))}
</div>
</div>
);
}

View File

@ -1,5 +1,3 @@
import { baseUrl } from "@/api/baseUrl";
import TimeAgo from "@/components/dynamic/TimeAgo";
import AddFaceIcon from "@/components/icons/AddFaceIcon"; import AddFaceIcon from "@/components/icons/AddFaceIcon";
import ActivityIndicator from "@/components/indicators/activity-indicator"; import ActivityIndicator from "@/components/indicators/activity-indicator";
import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizardDialog"; import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizardDialog";
@ -37,13 +35,12 @@ import {
TooltipContent, TooltipContent,
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import useContextMenu from "@/hooks/use-contextmenu";
import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useKeyboardListener from "@/hooks/use-keyboard-listener";
import useOptimisticState from "@/hooks/use-optimistic-state"; import useOptimisticState from "@/hooks/use-optimistic-state";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { Event } from "@/types/event"; import { Event } from "@/types/event";
import { FaceLibraryData, RecognizedFaceData } from "@/types/face"; import { FaceLibraryData } from "@/types/face";
import { FaceRecognitionConfig, FrigateConfig } from "@/types/frigateConfig"; import { FrigateConfig } from "@/types/frigateConfig";
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import axios from "axios"; import axios from "axios";
import { import {
@ -54,7 +51,7 @@ import {
useRef, useRef,
useState, useState,
} from "react"; } from "react";
import { isDesktop, isMobile } from "react-device-detect"; import { isDesktop } from "react-device-detect";
import { Trans, useTranslation } from "react-i18next"; import { Trans, useTranslation } from "react-i18next";
import { import {
LuFolderCheck, LuFolderCheck,
@ -62,16 +59,19 @@ import {
LuPencil, LuPencil,
LuRefreshCw, LuRefreshCw,
LuScanFace, LuScanFace,
LuSearch,
LuTrash2, LuTrash2,
} from "react-icons/lu"; } from "react-icons/lu";
import { useNavigate } from "react-router-dom";
import { toast } from "sonner"; import { toast } from "sonner";
import useSWR from "swr"; import useSWR from "swr";
import SearchDetailDialog, { import SearchDetailDialog, {
SearchTab, SearchTab,
} from "@/components/overlay/detail/SearchDetailDialog"; } from "@/components/overlay/detail/SearchDetailDialog";
import { SearchResult } from "@/types/search"; import { SearchResult } from "@/types/search";
import {
ClassificationCard,
GroupedClassificationCard,
} from "@/components/card/ClassificationCard";
import { ClassificationItemData } from "@/types/classification";
export default function FaceLibrary() { export default function FaceLibrary() {
const { t } = useTranslation(["views/faceLibrary"]); const { t } = useTranslation(["views/faceLibrary"]);
@ -641,7 +641,7 @@ function TrainingGrid({
// face data // face data
const faceGroups = useMemo(() => { const faceGroups = useMemo(() => {
const groups: { [eventId: string]: RecognizedFaceData[] } = {}; const groups: { [eventId: string]: ClassificationItemData[] } = {};
const faces = attemptImages const faces = attemptImages
.map((image) => { .map((image) => {
@ -650,6 +650,7 @@ function TrainingGrid({
try { try {
return { return {
filename: image, filename: image,
filepath: `clips/faces/train/${image}`,
timestamp: Number.parseFloat(parts[2]), timestamp: Number.parseFloat(parts[2]),
eventId: `${parts[0]}-${parts[1]}`, eventId: `${parts[0]}-${parts[1]}`,
name: parts[3], name: parts[3],
@ -739,7 +740,7 @@ function TrainingGrid({
type FaceAttemptGroupProps = { type FaceAttemptGroupProps = {
config: FrigateConfig; config: FrigateConfig;
group: RecognizedFaceData[]; group: ClassificationItemData[];
event?: Event; event?: Event;
faceNames: string[]; faceNames: string[];
selectedFaces: string[]; selectedFaces: string[];
@ -757,15 +758,16 @@ function FaceAttemptGroup({
onSelectEvent, onSelectEvent,
onRefresh, onRefresh,
}: FaceAttemptGroupProps) { }: FaceAttemptGroupProps) {
const navigate = useNavigate();
const { t } = useTranslation(["views/faceLibrary", "views/explore"]); const { t } = useTranslation(["views/faceLibrary", "views/explore"]);
// data // data
const allFacesSelected = useMemo( const threshold = useMemo(() => {
() => group.every((face) => selectedFaces.includes(face.filename)), return {
[group, selectedFaces], recognition: config.face_recognition.recognition_threshold,
); unknown: config.face_recognition.unknown_score,
};
}, [config]);
// interaction // interaction
@ -799,144 +801,10 @@ function FaceAttemptGroup({
[event, group, selectedFaces, onClickFaces, onSelectEvent], [event, group, selectedFaces, onClickFaces, onSelectEvent],
); );
return (
<div
className={cn(
"flex cursor-pointer flex-col gap-2 rounded-lg bg-card p-2 outline outline-[3px]",
isMobile && "w-full",
allFacesSelected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
)}
onClick={() => {
if (selectedFaces.length) {
handleClickEvent(true);
}
}}
onContextMenu={(e) => {
e.stopPropagation();
e.preventDefault();
handleClickEvent(true);
}}
>
<div className="flex flex-row justify-between">
<div className="flex flex-col gap-1">
<div className="select-none smart-capitalize">
{t("details.person")}
{event?.sub_label
? `: ${event.sub_label} (${Math.round((event.data.sub_label_score || 0) * 100)}%)`
: ": " + t("details.unknown")}
</div>
<TimeAgo
className="text-sm text-secondary-foreground"
time={group[0].timestamp * 1000}
dense
/>
</div>
{event && (
<Tooltip>
<TooltipTrigger>
<div
className="cursor-pointer"
onClick={() => {
navigate(`/explore?event_id=${event.id}`);
}}
>
<LuSearch className="size-4 text-muted-foreground" />
</div>
</TooltipTrigger>
<TooltipPortal>
<TooltipContent>
{t("details.item.button.viewInExplore", {
ns: "views/explore",
})}
</TooltipContent>
</TooltipPortal>
</Tooltip>
)}
</div>
<div
className={cn(
"gap-2",
isDesktop
? "flex flex-row flex-wrap"
: "grid grid-cols-2 sm:grid-cols-5 lg:grid-cols-6",
)}
>
{group.map((data: RecognizedFaceData) => (
<FaceAttempt
key={data.filename}
data={data}
faceNames={faceNames}
recognitionConfig={config.face_recognition}
selected={
allFacesSelected ? false : selectedFaces.includes(data.filename)
}
onClick={(data, meta) => {
if (meta || selectedFaces.length > 0) {
onClickFaces([data.filename], true);
} else if (event) {
onSelectEvent(event);
}
}}
onRefresh={onRefresh}
/>
))}
</div>
</div>
);
}
type FaceAttemptProps = {
data: RecognizedFaceData;
faceNames: string[];
recognitionConfig: FaceRecognitionConfig;
selected: boolean;
onClick: (data: RecognizedFaceData, meta: boolean) => void;
onRefresh: () => void;
};
function FaceAttempt({
data,
faceNames,
recognitionConfig,
selected,
onClick,
onRefresh,
}: FaceAttemptProps) {
const { t } = useTranslation(["views/faceLibrary"]);
const [imageLoaded, setImageLoaded] = useState(false);
const scoreStatus = useMemo(() => {
if (data.score >= recognitionConfig.recognition_threshold) {
return "match";
} else if (data.score >= recognitionConfig.unknown_score) {
return "potential";
} else {
return "unknown";
}
}, [data, recognitionConfig]);
// interaction
const imgRef = useRef<HTMLImageElement | null>(null);
useContextMenu(imgRef, () => {
onClick(data, true);
});
const imageArea = useMemo(() => {
if (imgRef.current == null || !imageLoaded) {
return undefined;
}
return imgRef.current.naturalWidth * imgRef.current.naturalHeight;
}, [imageLoaded]);
// api calls // api calls
const onTrainAttempt = useCallback( const onTrainAttempt = useCallback(
(trainName: string) => { (data: ClassificationItemData, trainName: string) => {
axios axios
.post(`/faces/train/${trainName}/classify`, { .post(`/faces/train/${trainName}/classify`, {
training_file: data.filename, training_file: data.filename,
@ -959,10 +827,11 @@ function FaceAttempt({
}); });
}); });
}, },
[data, onRefresh, t], [onRefresh, t],
); );
const onReprocess = useCallback(() => { const onReprocess = useCallback(
(data: ClassificationItemData) => {
axios axios
.post(`/faces/reprocess`, { training_file: data.filename }) .post(`/faces/reprocess`, { training_file: data.filename })
.then((resp) => { .then((resp) => {
@ -978,60 +847,39 @@ function FaceAttempt({
error.response?.data?.message || error.response?.data?.message ||
error.response?.data?.detail || error.response?.data?.detail ||
"Unknown error"; "Unknown error";
toast.error(t("toast.error.updateFaceScoreFailed", { errorMessage }), { toast.error(
t("toast.error.updateFaceScoreFailed", { errorMessage }),
{
position: "top-center", position: "top-center",
},
);
}); });
}); },
}, [data, onRefresh, t]); [onRefresh, t],
);
return ( return (
<> <GroupedClassificationCard
<div group={group}
className={cn( event={event}
"relative flex cursor-pointer flex-col rounded-lg outline outline-[3px]", threshold={threshold}
selected selectedItems={selectedFaces}
? "shadow-selected outline-selected" i18nLibrary="views/faceLibrary"
: "outline-transparent duration-500", objectType="person"
)} onClick={(data) => {
> if (data) {
<div className="relative w-full select-none overflow-hidden rounded-lg"> onClickFaces([data.filename], true);
<img } else {
ref={imgRef} handleClickEvent(true);
onLoad={() => setImageLoaded(true)} }
className={cn("size-44", isMobile && "w-full")}
src={`${baseUrl}clips/faces/train/${data.filename}`}
onClick={(e) => {
e.stopPropagation();
onClick(data, e.metaKey || e.ctrlKey);
}} }}
/> onSelectEvent={onSelectEvent}
{imageArea != undefined && (
<div className="absolute bottom-1 right-1 z-10 rounded-lg bg-black/50 px-2 py-1 text-xs text-white">
{t("pixels", { area: imageArea })}
</div>
)}
</div>
<div className="select-none p-2">
<div className="flex w-full flex-row items-center justify-between gap-2">
<div className="flex flex-col items-start text-xs text-primary-variant">
<div className="smart-capitalize">
{data.name == "unknown" ? t("details.unknown") : data.name}
</div>
<div
className={cn(
"",
scoreStatus == "match" && "text-success",
scoreStatus == "potential" && "text-orange-400",
scoreStatus == "unknown" && "text-danger",
)}
> >
{Math.round(data.score * 100)}% {(data) => (
</div> <>
</div>
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
<FaceSelectionDialog <FaceSelectionDialog
faceNames={faceNames} faceNames={faceNames}
onTrainAttempt={onTrainAttempt} onTrainAttempt={(name) => onTrainAttempt(data, name)}
> >
<AddFaceIcon className="size-5 cursor-pointer text-primary-variant hover:text-primary" /> <AddFaceIcon className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
</FaceSelectionDialog> </FaceSelectionDialog>
@ -1039,16 +887,14 @@ function FaceAttempt({
<TooltipTrigger> <TooltipTrigger>
<LuRefreshCw <LuRefreshCw
className="size-5 cursor-pointer text-primary-variant hover:text-primary" className="size-5 cursor-pointer text-primary-variant hover:text-primary"
onClick={() => onReprocess()} onClick={() => onReprocess(data)}
/> />
</TooltipTrigger> </TooltipTrigger>
<TooltipContent>{t("button.reprocessFace")}</TooltipContent> <TooltipContent>{t("button.reprocessFace")}</TooltipContent>
</Tooltip> </Tooltip>
</div>
</div>
</div>
</div>
</> </>
)}
</GroupedClassificationCard>
); );
} }
@ -1093,80 +939,32 @@ function FaceGrid({
)} )}
> >
{sortedFaces.map((image: string) => ( {sortedFaces.map((image: string) => (
<FaceImage <ClassificationCard
className="gap-2 rounded-lg bg-card p-2"
key={image} key={image}
name={pageToggle} data={{
image={image} name: pageToggle,
selected={selectedFaces.includes(image)} filename: image,
onClickFaces={onClickFaces} filepath: `clips/faces/${pageToggle}/${image}`,
onDelete={onDelete}
/>
))}
</div>
);
}
type FaceImageProps = {
name: string;
image: string;
selected: boolean;
onClickFaces: (images: string[], ctrl: boolean) => void;
onDelete: (name: string, ids: string[]) => void;
};
function FaceImage({
name,
image,
selected,
onClickFaces,
onDelete,
}: FaceImageProps) {
const { t } = useTranslation(["views/faceLibrary"]);
return (
<div
className={cn(
"flex cursor-pointer flex-col gap-2 rounded-lg bg-card outline outline-[3px]",
selected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
)}
onClick={(e) => {
e.stopPropagation();
onClickFaces([image], e.ctrlKey || e.metaKey);
}} }}
selected={selectedFaces.includes(image)}
i18nLibrary="views/faceLibrary"
onClick={(data, meta) => onClickFaces([data.filename], meta)}
> >
<div
className={cn(
"w-full overflow-hidden p-2 *:text-card-foreground",
isMobile && "flex justify-center",
)}
>
<img
className="h-40 rounded-lg"
src={`${baseUrl}clips/faces/${name}/${image}`}
/>
</div>
<div className="rounded-b-lg bg-card p-3">
<div className="flex w-full flex-row items-center justify-between gap-2">
<div className="flex flex-col items-start text-xs text-primary-variant">
<div className="smart-capitalize">{name}</div>
</div>
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
<Tooltip> <Tooltip>
<TooltipTrigger> <TooltipTrigger>
<LuTrash2 <LuTrash2
className="size-5 cursor-pointer text-primary-variant hover:text-primary" className="size-5 cursor-pointer text-primary-variant hover:text-primary"
onClick={(e) => { onClick={(e) => {
e.stopPropagation(); e.stopPropagation();
onDelete(name, [image]); onDelete(pageToggle, [image]);
}} }}
/> />
</TooltipTrigger> </TooltipTrigger>
<TooltipContent>{t("button.deleteFaceAttempts")}</TooltipContent> <TooltipContent>{t("button.deleteFaceAttempts")}</TooltipContent>
</Tooltip> </Tooltip>
</div> </ClassificationCard>
</div> ))}
</div>
</div> </div>
); );
} }

View File

@ -6,3 +6,17 @@ export type TrainFilter = {
min_score?: number; min_score?: number;
max_score?: number; max_score?: number;
}; };
export type ClassificationItemData = {
filepath: string;
filename: string;
name: string;
timestamp?: number;
eventId?: string;
score?: number;
};
export type ClassificationThreshold = {
recognition: number;
unknown: number;
};

View File

@ -1,11 +1,3 @@
export type FaceLibraryData = { export type FaceLibraryData = {
[faceName: string]: string[]; [faceName: string]: string[];
}; };
export type RecognizedFaceData = {
filename: string;
timestamp: number;
eventId: string;
name: string;
score: number;
};

View File

@ -38,7 +38,11 @@ export default function ModelSelectionView({
return ( return (
<div className="flex size-full gap-2 p-2"> <div className="flex size-full gap-2 p-2">
{classificationConfigs.map((config) => ( {classificationConfigs.map((config) => (
<ModelCard config={config} onClick={() => onClick(config)} /> <ModelCard
key={config.name}
config={config}
onClick={() => onClick(config)}
/>
))} ))}
</div> </div>
); );

View File

@ -1,4 +1,3 @@
import { baseUrl } from "@/api/baseUrl";
import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog";
import { Button, buttonVariants } from "@/components/ui/button"; import { Button, buttonVariants } from "@/components/ui/button";
import { import {
@ -60,7 +59,16 @@ import { IoMdArrowRoundBack } from "react-icons/io";
import { MdAutoFixHigh } from "react-icons/md"; import { MdAutoFixHigh } from "react-icons/md";
import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog"; import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog";
import useApiFilter from "@/hooks/use-api-filter"; import useApiFilter from "@/hooks/use-api-filter";
import { TrainFilter } from "@/types/classification"; import { ClassificationItemData, TrainFilter } from "@/types/classification";
import {
ClassificationCard,
GroupedClassificationCard,
} from "@/components/card/ClassificationCard";
import { Event } from "@/types/event";
import SearchDetailDialog, {
SearchTab,
} from "@/components/overlay/detail/SearchDetailDialog";
import { SearchResult } from "@/types/search";
type ModelTrainingViewProps = { type ModelTrainingViewProps = {
model: CustomClassificationModelConfig; model: CustomClassificationModelConfig;
@ -626,35 +634,19 @@ function DatasetGrid({
className="scrollbar-container flex flex-wrap gap-2 overflow-y-auto p-2" className="scrollbar-container flex flex-wrap gap-2 overflow-y-auto p-2"
> >
{classData.map((image) => ( {classData.map((image) => (
<div <ClassificationCard
className={cn( key={image}
"flex w-60 cursor-pointer flex-col gap-2 rounded-lg bg-card outline outline-[3px]", className="w-60 gap-4 rounded-lg bg-card p-2"
selectedImages.includes(image) imgClassName="size-auto"
? "shadow-selected outline-selected" data={{
: "outline-transparent duration-500", filename: image,
)} filepath: `clips/${modelName}/dataset/${categoryName}/${image}`,
onClick={(e) => { name: "",
e.stopPropagation();
if (e.ctrlKey || e.metaKey) {
onClickImages([image], true);
}
}} }}
selected={selectedImages.includes(image)}
i18nLibrary="views/classificationModel"
onClick={(data, _) => onClickImages([data.filename], true)}
> >
<div
className={cn(
"w-full overflow-hidden p-2 *:text-card-foreground",
isMobile && "flex justify-center",
)}
>
<img
className="rounded-lg"
src={`${baseUrl}clips/${modelName}/dataset/${categoryName}/${image}`}
/>
</div>
<div className="rounded-b-lg bg-card p-3">
<div className="flex w-full flex-row items-center justify-between gap-2">
<div className="flex w-full flex-row items-start justify-end gap-5 md:gap-4">
<Tooltip> <Tooltip>
<TooltipTrigger> <TooltipTrigger>
<LuTrash2 <LuTrash2
@ -669,10 +661,7 @@ function DatasetGrid({
{t("button.deleteClassificationAttempts")} {t("button.deleteClassificationAttempts")}
</TooltipContent> </TooltipContent>
</Tooltip> </Tooltip>
</div> </ClassificationCard>
</div>
</div>
</div>
))} ))}
</div> </div>
); );
@ -700,20 +689,19 @@ function TrainGrid({
onRefresh, onRefresh,
onDelete, onDelete,
}: TrainGridProps) { }: TrainGridProps) {
const { t } = useTranslation(["views/classificationModel"]); const trainData = useMemo<ClassificationItemData[]>(
const trainData = useMemo(
() => () =>
trainImages trainImages
.map((raw) => { .map((raw) => {
const parts = raw.replaceAll(".webp", "").split("-"); const parts = raw.replaceAll(".webp", "").split("-");
const rawScore = Number.parseFloat(parts[2]); const rawScore = Number.parseFloat(parts[4]);
return { return {
raw, filename: raw,
timestamp: parts[0], filepath: `clips/${model.name}/train/${raw}`,
label: parts[1], timestamp: Number.parseFloat(parts[2]),
score: rawScore * 100, eventId: `${parts[0]}-${parts[1]}`,
truePositive: rawScore >= model.threshold, name: parts[3],
score: rawScore,
}; };
}) })
.filter((data) => { .filter((data) => {
@ -721,10 +709,7 @@ function TrainGrid({
return true; return true;
} }
if ( if (trainFilter.classes && !trainFilter.classes.includes(data.name)) {
trainFilter.classes &&
!trainFilter.classes.includes(data.label)
) {
return false; return false;
} }
@ -744,10 +729,68 @@ function TrainGrid({
return true; return true;
}) })
.sort((a, b) => b.timestamp.localeCompare(a.timestamp)), .sort((a, b) => b.timestamp - a.timestamp),
[model, trainImages, trainFilter], [model, trainImages, trainFilter],
); );
if (model.state_config) {
return (
<StateTrainGrid
model={model}
contentRef={contentRef}
classes={classes}
trainData={trainData}
selectedImages={selectedImages}
onClickImages={onClickImages}
onRefresh={onRefresh}
onDelete={onDelete}
/>
);
}
return (
<ObjectTrainGrid
model={model}
contentRef={contentRef}
classes={classes}
trainData={trainData}
selectedImages={selectedImages}
onClickImages={onClickImages}
onRefresh={onRefresh}
onDelete={onDelete}
/>
);
}
type StateTrainGridProps = {
model: CustomClassificationModelConfig;
contentRef: MutableRefObject<HTMLDivElement | null>;
classes: string[];
trainData?: ClassificationItemData[];
selectedImages: string[];
onClickImages: (images: string[], ctrl: boolean) => void;
onRefresh: () => void;
onDelete: (ids: string[]) => void;
};
function StateTrainGrid({
model,
contentRef,
classes,
trainData,
selectedImages,
onClickImages,
onRefresh,
onDelete,
}: StateTrainGridProps) {
const { t } = useTranslation(["views/classificationModel"]);
const threshold = useMemo(() => {
return {
recognition: model.threshold,
unknown: model.threshold,
};
}, [model]);
return ( return (
<div <div
ref={contentRef} ref={contentRef}
@ -757,51 +800,21 @@ function TrainGrid({
)} )}
> >
{trainData?.map((data) => ( {trainData?.map((data) => (
<div <ClassificationCard
key={data.timestamp} key={data.filename}
className={cn( className="w-60 gap-2 rounded-lg bg-card p-2"
"flex w-56 cursor-pointer flex-col gap-2 rounded-lg bg-card outline outline-[3px]", imgClassName="size-auto"
selectedImages.includes(data.raw) data={data}
? "shadow-selected outline-selected" threshold={threshold}
: "outline-transparent duration-500", selected={selectedImages.includes(data.filename)}
isMobile && "w-[48%]", i18nLibrary="views/classificationModel"
)} showArea={false}
onClick={(e) => { onClick={(data, meta) => onClickImages([data.filename], meta)}
e.stopPropagation();
onClickImages([data.raw], e.ctrlKey || e.metaKey);
}}
> >
<div
className={cn(
"w-full overflow-hidden p-2 *:text-card-foreground",
isMobile && "flex justify-center",
)}
>
<img
className="w-56 rounded-lg"
src={`${baseUrl}clips/${model.name}/train/${data.raw}`}
/>
</div>
<div className="rounded-b-lg bg-card p-3">
<div className="flex w-full flex-row items-center justify-between gap-2">
<div className="flex flex-col items-start text-xs text-primary-variant">
<div className="smart-capitalize">
{data.label.replaceAll("_", " ")}
</div>
<div
className={cn(
"",
data.truePositive ? "text-success" : "text-danger",
)}
>
{data.score}%
</div>
</div>
<div className="flex flex-row items-start justify-end gap-5 md:gap-4">
<ClassificationSelectionDialog <ClassificationSelectionDialog
classes={classes} classes={classes}
modelName={model.name} modelName={model.name}
image={data.raw} image={data.filename}
onRefresh={onRefresh} onRefresh={onRefresh}
> >
<TbCategoryPlus className="size-5 cursor-pointer text-primary-variant hover:text-primary" /> <TbCategoryPlus className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
@ -812,7 +825,7 @@ function TrainGrid({
className="size-5 cursor-pointer text-primary-variant hover:text-primary" className="size-5 cursor-pointer text-primary-variant hover:text-primary"
onClick={(e) => { onClick={(e) => {
e.stopPropagation(); e.stopPropagation();
onDelete([data.raw]); onDelete([data.filename]);
}} }}
/> />
</TooltipTrigger> </TooltipTrigger>
@ -820,11 +833,175 @@ function TrainGrid({
{t("button.deleteClassificationAttempts")} {t("button.deleteClassificationAttempts")}
</TooltipContent> </TooltipContent>
</Tooltip> </Tooltip>
</div> </ClassificationCard>
</div>
</div>
</div>
))} ))}
</div> </div>
); );
} }
type ObjectTrainGridProps = {
model: CustomClassificationModelConfig;
contentRef: MutableRefObject<HTMLDivElement | null>;
classes: string[];
trainData?: ClassificationItemData[];
selectedImages: string[];
onClickImages: (images: string[], ctrl: boolean) => void;
onRefresh: () => void;
onDelete: (ids: string[]) => void;
};
function ObjectTrainGrid({
model,
contentRef,
classes,
trainData,
selectedImages,
onClickImages,
onRefresh,
onDelete,
}: ObjectTrainGridProps) {
const { t } = useTranslation(["views/classificationModel"]);
// item data
const groups = useMemo(() => {
const groups: { [eventId: string]: ClassificationItemData[] } = {};
trainData
?.sort((a, b) => a.eventId!.localeCompare(b.eventId!))
.reverse()
.forEach((data) => {
if (groups[data.eventId!]) {
groups[data.eventId!].push(data);
} else {
groups[data.eventId!] = [data];
}
});
return groups;
}, [trainData]);
const eventIdsQuery = useMemo(() => Object.keys(groups).join(","), [groups]);
const { data: events } = useSWR<Event[]>([
"event_ids",
{ ids: eventIdsQuery },
]);
const threshold = useMemo(() => {
return {
recognition: model.threshold,
unknown: model.threshold,
};
}, [model]);
// selection
const [selectedEvent, setSelectedEvent] = useState<Event>();
const [dialogTab, setDialogTab] = useState<SearchTab>("details");
// handlers
const handleClickEvent = useCallback(
(
group: ClassificationItemData[],
event: Event | undefined,
meta: boolean,
) => {
if (event && selectedImages.length == 0 && !meta) {
setSelectedEvent(event);
} else {
const anySelected =
group.find((item) => selectedImages.includes(item.filename)) !=
undefined;
if (anySelected) {
// deselect all
const toDeselect: string[] = [];
group.forEach((item) => {
if (selectedImages.includes(item.filename)) {
toDeselect.push(item.filename);
}
});
onClickImages(toDeselect, false);
} else {
// select all
onClickImages(
group.map((item) => item.filename),
true,
);
}
}
},
[selectedImages, onClickImages],
);
return (
<>
<SearchDetailDialog
search={
selectedEvent ? (selectedEvent as unknown as SearchResult) : undefined
}
page={dialogTab}
setSimilarity={undefined}
setSearchPage={setDialogTab}
setSearch={(search) => setSelectedEvent(search as unknown as Event)}
setInputFocused={() => {}}
/>
<div
ref={contentRef}
className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll p-1"
>
{Object.entries(groups).map(([key, group]) => {
const event = events?.find((ev) => ev.id == key);
return (
<GroupedClassificationCard
key={key}
group={group}
event={event}
threshold={threshold}
selectedItems={selectedImages}
i18nLibrary="views/classificationModel"
objectType={model.object_config?.objects?.at(0) ?? "Object"}
onClick={(data) => {
if (data) {
onClickImages([data.filename], true);
} else {
handleClickEvent(group, event, true);
}
}}
onSelectEvent={() => {}}
>
{(data) => (
<>
<ClassificationSelectionDialog
classes={classes}
modelName={model.name}
image={data.filename}
onRefresh={onRefresh}
>
<TbCategoryPlus className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
</ClassificationSelectionDialog>
<Tooltip>
<TooltipTrigger>
<LuTrash2
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
onClick={(e) => {
e.stopPropagation();
onDelete([data.filename]);
}}
/>
</TooltipTrigger>
<TooltipContent>
{t("button.deleteClassificationAttempts")}
</TooltipContent>
</Tooltip>
</>
)}
</GroupedClassificationCard>
);
})}
</div>
</>
);
}