From d6f5d2b0fa8a91d5c654217e65cac5ff2c62d7fe Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 22 Oct 2025 07:36:09 -0600 Subject: [PATCH] Classification Model UI Refactor (#20602) * Add cutoff for object classification * Add selector for classifiction model type * Improve model selection view * Clean up design of classification card * Tweaks * Adjust button colors * Improvements to gradients and making face library consistent * Add basic classification model wizard * Use relative coordinates * Properly get resolution * Clean up exports * Cleanup * Cleanup * Update to use pre-defined component for image shadow * Refactor image grouping * Clean up mobile * Clean up decision logic * Remove max check on classification objects * Increase default number of faces shown * Cleanup * Improve mobile layout * Clenaup * Update vocabulary * Fix layout * Fix page * Cleanup * Choose last item for unknown objects * Move explore button * Cleanup grid * Cleanup classification * Cleanup grid * Cleanup * Set transparency * Set unknown * Don't filter all configs * Check length --- .../object_classification.md | 2 +- .../state_classification.md | 2 +- docs/docs/configuration/face_recognition.md | 10 +- docs/docs/configuration/reference.md | 14 +- frigate/config/classification.py | 6 +- .../real_time/custom_classification.py | 8 +- .../locales/en/config/face_recognition.json | 2 +- .../locales/en/views/classificationModel.json | 10 +- web/public/locales/en/views/faceLibrary.json | 6 +- .../components/card/ClassificationCard.tsx | 435 +++++++++++------- web/src/components/card/ExportCard.tsx | 10 +- .../ClassificationModelWizardDialog.tsx | 66 +++ .../overlay/ClassificationSelectionDialog.tsx | 20 +- .../overlay/FaceSelectionDialog.tsx | 2 +- .../components/overlay/ImageShadowOverlay.tsx | 27 ++ .../overlay/dialog/TrainFilterDialog.tsx | 4 +- .../components/player/BirdseyeLivePlayer.tsx | 4 +- web/src/components/player/LivePlayer.tsx | 6 +- web/src/components/ui/dialog.tsx | 2 +- web/src/pages/FaceLibrary.tsx | 114 ++--- web/src/types/frigateConfig.ts | 4 +- .../classification/ModelSelectionView.tsx | 142 ++++-- .../classification/ModelTrainingView.tsx | 204 ++++---- 23 files changed, 666 insertions(+), 434 deletions(-) create mode 100644 web/src/components/classification/ClassificationModelWizardDialog.tsx create mode 100644 web/src/components/overlay/ImageShadowOverlay.tsx diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md index a75aae31a..9465716b7 100644 --- a/docs/docs/configuration/custom_classification/object_classification.md +++ b/docs/docs/configuration/custom_classification/object_classification.md @@ -67,7 +67,7 @@ When choosing which objects to classify, start with a small number of visually d ### Improving the Model - **Problem framing**: Keep classes visually distinct and relevant to the chosen object types. -- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day, weather, and distances. +- **Data collection**: Use the model’s Recent Classification tab to gather balanced examples across times of day, weather, and distances. - **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered. - **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels. - **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation. diff --git a/docs/docs/configuration/custom_classification/state_classification.md b/docs/docs/configuration/custom_classification/state_classification.md index ec38ea696..afc79eff8 100644 --- a/docs/docs/configuration/custom_classification/state_classification.md +++ b/docs/docs/configuration/custom_classification/state_classification.md @@ -49,4 +49,4 @@ When choosing a portion of the camera frame for state classification, it is impo ### Improving the Model - **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary. -- **Data collection**: Use the model’s Train tab to gather balanced examples across times of day and weather. +- **Data collection**: Use the model’s Recent Classifications tab to gather balanced examples across times of day and weather. diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index d14946eaf..129669e7f 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -70,7 +70,7 @@ Fine-tune face recognition with these optional parameters at the global level of - `min_faces`: Min face recognitions for the sub label to be applied to the person object. - Default: `1` - `save_attempts`: Number of images of recognized faces to save for training. - - Default: `100`. + - Default: `200`. - `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this. - Default: `True`. - `device`: Target a specific device to run the face recognition model on (multi-GPU installation). @@ -114,9 +114,9 @@ When choosing images to include in the face training set it is recommended to al ::: -### Understanding the Train Tab +### Understanding the Recent Recognitions Tab -The Train tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching. +The Recent Recognitions tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching. Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training. @@ -140,7 +140,7 @@ Once front-facing images are performing well, start choosing slightly off-angle Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above. -1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Train tab in the Frigate UI's Face Library. +1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Recent Recognitions tab in the Frigate UI's Face Library. If you are using a Frigate+ or `face` detecting model: @@ -186,7 +186,7 @@ Avoid training on images that already score highly, as this can lead to over-fit No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person. For more guidance, refer to the section above on improving recognition accuracy. -### I see scores above the threshold in the train tab, but a sub label wasn't assigned? +### I see scores above the threshold in the Recent Recognitions tab, but a sub label wasn't assigned? The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 3d963a5bd..663192c06 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -630,7 +630,7 @@ face_recognition: # Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below) min_faces: 1 # Optional: Number of images of recognized faces to save for training (default: shown below) - save_attempts: 100 + save_attempts: 200 # Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below) blur_confidence_filter: True # Optional: Set the model size used face recognition. (default: shown below) @@ -671,20 +671,18 @@ lpr: # Optional: List of regex replacement rules to normalize detected plates (default: shown below) replace_rules: {} -# Optional: Configuration for AI generated tracked object descriptions +# Optional: Configuration for AI / LLM provider # WARNING: Depending on the provider, this will send thumbnails over the internet -# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at -# the camera level (enabled: False) to enhance privacy for indoor cameras. +# to Google or OpenAI's LLMs to generate descriptions. GenAI features can be configured at +# the camera level to enhance privacy for indoor cameras. genai: - # Optional: Enable AI description generation (default: shown below) - enabled: False - # Required if enabled: Provider must be one of ollama, gemini, or openai + # Required: Provider must be one of ollama, gemini, or openai provider: ollama # Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider. base_url: http://localhost::11434 # Required if gemini or openai api_key: "{FRIGATE_GENAI_API_KEY}" - # Required if enabled: The model to use with the provider. + # Required: The model to use with the provider. model: gemini-1.5-flash # Optional additional args to pass to the GenAI Provider (default: None) provider_options: diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 56126e4d4..5b6cb8cec 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -69,7 +69,7 @@ class BirdClassificationConfig(FrigateBaseModel): class CustomClassificationStateCameraConfig(FrigateBaseModel): - crop: list[int, int, int, int] = Field( + crop: list[float, float, float, float] = Field( title="Crop of image frame on this camera to run classification on." ) @@ -197,7 +197,9 @@ class FaceRecognitionConfig(FrigateBaseModel): title="Min face recognitions for the sub label to be applied to the person object.", ) save_attempts: int = Field( - default=100, ge=0, title="Number of face attempts to save in the train tab." + default=200, + ge=0, + title="Number of face attempts to save in the recent recognitions tab.", ) blur_confidence_filter: bool = Field( default=True, title="Apply blur quality filter to face confidence." diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index e5e4fc90e..1fb9dfc97 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -96,10 +96,10 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): camera_config = self.model_config.state_config.cameras[camera] crop = [ - camera_config.crop[0], - camera_config.crop[1], - camera_config.crop[2], - camera_config.crop[3], + camera_config.crop[0] * self.config.cameras[camera].detect.width, + camera_config.crop[1] * self.config.cameras[camera].detect.height, + camera_config.crop[2] * self.config.cameras[camera].detect.width, + camera_config.crop[3] * self.config.cameras[camera].detect.height, ] should_run = False diff --git a/web/public/locales/en/config/face_recognition.json b/web/public/locales/en/config/face_recognition.json index ec6f8929b..705d75468 100644 --- a/web/public/locales/en/config/face_recognition.json +++ b/web/public/locales/en/config/face_recognition.json @@ -23,7 +23,7 @@ "label": "Min face recognitions for the sub label to be applied to the person object." }, "save_attempts": { - "label": "Number of face attempts to save in the train tab." + "label": "Number of face attempts to save in the recent recognitions tab." }, "blur_confidence_filter": { "label": "Apply blur quality filter to face confidence." diff --git a/web/public/locales/en/views/classificationModel.json b/web/public/locales/en/views/classificationModel.json index 47b2b13bf..dcfc5a1b2 100644 --- a/web/public/locales/en/views/classificationModel.json +++ b/web/public/locales/en/views/classificationModel.json @@ -41,13 +41,17 @@ "invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens." }, "train": { - "title": "Train", - "aria": "Select Train" + "title": "Recent Classifications", + "aria": "Select Recent Classifications" }, "categories": "Classes", "createCategory": { "new": "Create New Class" }, "categorizeImageAs": "Classify Image As:", - "categorizeImage": "Classify Image" + "categorizeImage": "Classify Image", + "wizard": { + "title": "Create New Classification", + "description": "Create a new state or object classification model." + } } diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 3a0804511..6febf85f0 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -22,7 +22,7 @@ "title": "Create Collection", "desc": "Create a new collection", "new": "Create New Face", - "nextSteps": "To build a strong foundation:
  • Use the Train tab to select and train on images for each detected person.
  • Focus on straight-on images for best results; avoid training images that capture faces at an angle.
  • " + "nextSteps": "To build a strong foundation:
  • Use the Recent Recognitions tab to select and train on images for each detected person.
  • Focus on straight-on images for best results; avoid training images that capture faces at an angle.
  • " }, "steps": { "faceName": "Enter Face Name", @@ -33,8 +33,8 @@ } }, "train": { - "title": "Train", - "aria": "Select train", + "title": "Recent Recognitions", + "aria": "Select recent recognitions", "empty": "There are no recent face recognition attempts" }, "selectItem": "Select {{item}}", diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index 5153b6d71..2fbe36804 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -6,7 +6,7 @@ import { ClassificationThreshold, } from "@/types/classification"; import { Event } from "@/types/event"; -import { useMemo, useRef, useState } from "react"; +import { forwardRef, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; import { useTranslation } from "react-i18next"; import TimeAgo from "../dynamic/TimeAgo"; @@ -14,7 +14,24 @@ import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { LuSearch } from "react-icons/lu"; import { TooltipPortal } from "@radix-ui/react-tooltip"; import { useNavigate } from "react-router-dom"; -import { getTranslatedLabel } from "@/utils/i18n"; +import { HiSquare2Stack } from "react-icons/hi2"; +import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "../ui/dialog"; +import { + MobilePage, + MobilePageContent, + MobilePageDescription, + MobilePageHeader, + MobilePageTitle, + MobilePageTrigger, +} from "../mobile/MobilePage"; type ClassificationCardProps = { className?: string; @@ -24,20 +41,28 @@ type ClassificationCardProps = { selected: boolean; i18nLibrary: string; showArea?: boolean; + count?: number; onClick: (data: ClassificationItemData, meta: boolean) => void; children?: React.ReactNode; }; -export function ClassificationCard({ - className, - imgClassName, - data, - threshold, - selected, - i18nLibrary, - showArea = true, - onClick, - children, -}: ClassificationCardProps) { +export const ClassificationCard = forwardRef< + HTMLDivElement, + ClassificationCardProps +>(function ClassificationCard( + { + className, + imgClassName, + data, + threshold, + selected, + i18nLibrary, + showArea = true, + count, + onClick, + children, + }, + ref, +) { const { t } = useTranslation([i18nLibrary]); const [imageLoaded, setImageLoaded] = useState(false); @@ -72,61 +97,81 @@ export function ClassificationCard({ }, [showArea, imageLoaded]); return ( - <> -
    { + const isMeta = e.metaKey || e.ctrlKey; + if (isMeta) { + e.stopPropagation(); + } + onClick(data, isMeta); + }} + onContextMenu={(e) => { + e.preventDefault(); + e.stopPropagation(); + onClick(data, true); + }} + > + -
    - setImageLoaded(true)} - className={cn("size-44", imgClassName, isMobile && "w-full")} - src={`${baseUrl}${data.filepath}`} - onClick={(e) => { - e.stopPropagation(); - onClick(data, e.metaKey || e.ctrlKey); - }} - /> - {imageArea != undefined && ( -
    - {t("information.pixels", { ns: "common", area: imageArea })} + onLoad={() => setImageLoaded(true)} + src={`${baseUrl}${data.filepath}`} + /> + + {count && ( +
    +
    {count}
    {" "} + +
    + )} + {!count && imageArea != undefined && ( +
    + {t("information.pixels", { ns: "common", area: imageArea })} +
    + )} +
    +
    +
    +
    + {data.name == "unknown" ? t("details.unknown") : data.name} +
    + {data.score && ( +
    + {Math.round(data.score * 100)}%
    )}
    -
    -
    -
    -
    - {data.name == "unknown" ? t("details.unknown") : data.name} -
    - {data.score && ( -
    - {Math.round(data.score * 100)}% -
    - )} -
    -
    - {children} -
    -
    +
    + {children}
    - +
    ); -} +}); type GroupedClassificationCardProps = { group: ClassificationItemData[]; @@ -136,7 +181,6 @@ type GroupedClassificationCardProps = { i18nLibrary: string; objectType: string; onClick: (data: ClassificationItemData | undefined) => void; - onSelectEvent: (event: Event) => void; children?: (data: ClassificationItemData) => React.ReactNode; }; export function GroupedClassificationCard({ @@ -145,20 +189,54 @@ export function GroupedClassificationCard({ threshold, selectedItems, i18nLibrary, - objectType, onClick, - onSelectEvent, children, }: GroupedClassificationCardProps) { const navigate = useNavigate(); const { t } = useTranslation(["views/explore", i18nLibrary]); + const [detailOpen, setDetailOpen] = useState(false); // data - const allItemsSelected = useMemo( - () => group.every((data) => selectedItems.includes(data.filename)), - [group, selectedItems], - ); + const bestItem = useMemo(() => { + let best: undefined | ClassificationItemData = undefined; + + group.forEach((item) => { + if (item?.name != undefined && item.name != "none") { + if ( + best?.score == undefined || + (item.score && best.score < item.score) + ) { + best = item; + } + } + }); + + if (!best) { + return group.at(-1); + } + + const bestTyped: ClassificationItemData = best; + return { + ...bestTyped, + name: event ? (event.sub_label ?? t("details.unknown")) : bestTyped.name, + score: event?.data?.sub_label_score || bestTyped.score, + }; + }, [group, event, t]); + + const bestScoreStatus = useMemo(() => { + if (!bestItem?.score || !threshold) { + return "unknown"; + } + + if (bestItem.score >= threshold.recognition) { + return "match"; + } else if (bestItem.score >= threshold.unknown) { + return "potential"; + } else { + return "unknown"; + } + }, [bestItem, threshold]); const time = useMemo(() => { const item = group[0]; @@ -170,94 +248,143 @@ export function GroupedClassificationCard({ return item.timestamp * 1000; }, [group]); - return ( -
    { - if (selectedItems.length) { - onClick(undefined); - } - }} - onContextMenu={(e) => { - e.stopPropagation(); - e.preventDefault(); - onClick(undefined); - }} - > -
    -
    -
    - {getTranslatedLabel(objectType)} - {event?.sub_label - ? `: ${event.sub_label} (${Math.round((event.data.sub_label_score || 0) * 100)}%)` - : ": " + t("details.unknown")} -
    - {time && ( - - )} -
    - {event && ( - - -
    { - navigate(`/explore?event_id=${event.id}`); - }} - > - -
    -
    - - - {t("details.item.button.viewInExplore", { - ns: "views/explore", - })} - - -
    - )} -
    + if (!bestItem) { + return null; + } -
    + { + if (meta || selectedItems.length > 0) { + onClick(undefined); + } else { + setDetailOpen(true); + } + }} + /> + { + if (!open) { + setDetailOpen(false); + } + }} > - {group.map((data: ClassificationItemData) => ( - { - if (meta || selectedItems.length > 0) { - onClick(data); - } else if (event) { - onSelectEvent(event); - } - }} - > - {children?.(data)} - - ))} -
    -
    + + e.preventDefault()} + > + <> +
    +
    + + {event?.sub_label ? event.sub_label : t("details.unknown")} + {event?.sub_label && ( +
    {`${Math.round((event.data.sub_label_score || 0) * 100)}%`}
    + )} +
    + + {time && ( + + )} + +
    + {isDesktop && ( +
    + {event && ( + + +
    { + navigate(`/explore?event_id=${event.id}`); + }} + > + +
    +
    + + + {t("details.item.button.viewInExplore", { + ns: "views/explore", + })} + + +
    + )} +
    + )} +
    +
    + {group.map((data: ClassificationItemData) => ( +
    + { + if (meta || selectedItems.length > 0) { + onClick(data); + } + }} + > + {children?.(data)} + +
    + ))} +
    + +
    + + ); } diff --git a/web/src/components/card/ExportCard.tsx b/web/src/components/card/ExportCard.tsx index cf0685caa..d57a30b52 100644 --- a/web/src/components/card/ExportCard.tsx +++ b/web/src/components/card/ExportCard.tsx @@ -21,6 +21,7 @@ import { baseUrl } from "@/api/baseUrl"; import { cn } from "@/lib/utils"; import { shareOrCopy } from "@/utils/browserUtil"; import { useTranslation } from "react-i18next"; +import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; type ExportProps = { className: string; @@ -145,7 +146,7 @@ export default function ExportCard({ <> {exportedRecording.thumb_path.length > 0 ? ( setLoading(false)} /> @@ -224,10 +225,9 @@ export default function ExportCard({ {loading && ( )} -
    -
    - {exportedRecording.name.replaceAll("_", " ")} -
    + +
    + {exportedRecording.name.replaceAll("_", " ")}
    diff --git a/web/src/components/classification/ClassificationModelWizardDialog.tsx b/web/src/components/classification/ClassificationModelWizardDialog.tsx new file mode 100644 index 000000000..621c9ea90 --- /dev/null +++ b/web/src/components/classification/ClassificationModelWizardDialog.tsx @@ -0,0 +1,66 @@ +import { useTranslation } from "react-i18next"; +import StepIndicator from "../indicators/StepIndicator"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "../ui/dialog"; +import { useState } from "react"; + +const STEPS = [ + "classificationWizard.steps.nameAndDefine", + "classificationWizard.steps.stateArea", + "classificationWizard.steps.chooseExamples", + "classificationWizard.steps.train", +]; + +type ClassificationModelWizardDialogProps = { + open: boolean; + onClose: () => void; +}; +export default function ClassificationModelWizardDialog({ + open, + onClose, +}: ClassificationModelWizardDialogProps) { + const { t } = useTranslation(["views/classificationModel"]); + + // step management + const [currentStep, _] = useState(0); + + return ( + { + if (!open) { + onClose; + } + }} + > + { + e.preventDefault(); + }} + > + + + {t("wizard.title")} + {currentStep === 0 && ( + {t("wizard.description")} + )} + + +
    +
    +
    +
    +
    + ); +} diff --git a/web/src/components/overlay/ClassificationSelectionDialog.tsx b/web/src/components/overlay/ClassificationSelectionDialog.tsx index f86ced19a..ca5057ee5 100644 --- a/web/src/components/overlay/ClassificationSelectionDialog.tsx +++ b/web/src/components/overlay/ClassificationSelectionDialog.tsx @@ -20,15 +20,14 @@ import { TooltipTrigger, } from "@/components/ui/tooltip"; import { isDesktop, isMobile } from "react-device-detect"; -import { LuPlus } from "react-icons/lu"; import { useTranslation } from "react-i18next"; import { cn } from "@/lib/utils"; import React, { ReactNode, useCallback, useMemo, useState } from "react"; import TextEntryDialog from "./dialog/TextEntryDialog"; import { Button } from "../ui/button"; -import { MdCategory } from "react-icons/md"; import axios from "axios"; import { toast } from "sonner"; +import { Separator } from "../ui/separator"; type ClassificationSelectionDialogProps = { className?: string; @@ -97,7 +96,7 @@ export default function ClassificationSelectionDialog({ ); return ( -
    +
    {newClass && ( - setNewClass(true)} - > - - {t("createCategory.new")} - {classes.sort().map((category) => ( onCategorizeImage(category)} > - {category.replaceAll("_", " ")} ))} + + setNewClass(true)} + > + {t("createCategory.new")} +
    diff --git a/web/src/components/overlay/FaceSelectionDialog.tsx b/web/src/components/overlay/FaceSelectionDialog.tsx index 3644ff1cf..174428a12 100644 --- a/web/src/components/overlay/FaceSelectionDialog.tsx +++ b/web/src/components/overlay/FaceSelectionDialog.tsx @@ -62,7 +62,7 @@ export default function FaceSelectionDialog({ ); return ( -
    +
    {newFace && ( +
    +
    + + ); +} diff --git a/web/src/components/overlay/dialog/TrainFilterDialog.tsx b/web/src/components/overlay/dialog/TrainFilterDialog.tsx index f4ccf41e1..982523ae9 100644 --- a/web/src/components/overlay/dialog/TrainFilterDialog.tsx +++ b/web/src/components/overlay/dialog/TrainFilterDialog.tsx @@ -60,7 +60,7 @@ export default function TrainFilterDialog({ moreFiltersSelected ? "text-white" : "text-secondary-foreground", )} /> - {isDesktop && t("more")} + {isDesktop && t("filter")} ); const content = ( @@ -122,7 +122,7 @@ export default function TrainFilterDialog({ return ( -
    -
    +
    {player}
    diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index f61e544eb..1f5ca703a 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -25,6 +25,7 @@ import { PlayerStats } from "./PlayerStats"; import { LuVideoOff } from "react-icons/lu"; import { Trans, useTranslation } from "react-i18next"; import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name"; +import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; type LivePlayerProps = { cameraRef?: (ref: HTMLDivElement | null) => void; @@ -328,10 +329,7 @@ export default function LivePlayer({ > {cameraEnabled && ((showStillWithoutActivity && !liveReady) || liveReady) && ( - <> -
    -
    - + )} {player} {cameraEnabled && diff --git a/web/src/components/ui/dialog.tsx b/web/src/components/ui/dialog.tsx index 761d815be..65a861012 100644 --- a/web/src/components/ui/dialog.tsx +++ b/web/src/components/ui/dialog.tsx @@ -107,7 +107,7 @@ const DialogContent = React.forwardRef< > {children} - + Close diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index fc8e73e3f..677bb58e0 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -63,10 +63,6 @@ import { } from "react-icons/lu"; import { toast } from "sonner"; import useSWR from "swr"; -import SearchDetailDialog, { - SearchTab, -} from "@/components/overlay/detail/SearchDetailDialog"; -import { SearchResult } from "@/types/search"; import { ClassificationCard, GroupedClassificationCard, @@ -686,11 +682,6 @@ function TrainingGrid({ { ids: eventIdsQuery }, ]); - // selection - - const [selectedEvent, setSelectedEvent] = useState(); - const [dialogTab, setDialogTab] = useState("details"); - if (attemptImages.length == 0) { return (
    @@ -701,40 +692,29 @@ function TrainingGrid({ } return ( - <> - setSelectedEvent(search as unknown as Event)} - setInputFocused={() => {}} - /> - -
    - {Object.entries(faceGroups).map(([key, group]) => { - const event = events?.find((ev) => ev.id == key); - return ( +
    + {Object.entries(faceGroups).map(([key, group]) => { + const event = events?.find((ev) => ev.id == key); + return ( +
    - ); - })} -
    - +
    + ); + })} +
    ); } @@ -745,7 +725,6 @@ type FaceAttemptGroupProps = { faceNames: string[]; selectedFaces: string[]; onClickFaces: (image: string[], ctrl: boolean) => void; - onSelectEvent: (event: Event) => void; onRefresh: () => void; }; function FaceAttemptGroup({ @@ -755,7 +734,6 @@ function FaceAttemptGroup({ faceNames, selectedFaces, onClickFaces, - onSelectEvent, onRefresh, }: FaceAttemptGroupProps) { const { t } = useTranslation(["views/faceLibrary", "views/explore"]); @@ -773,8 +751,8 @@ function FaceAttemptGroup({ const handleClickEvent = useCallback( (meta: boolean) => { - if (event && selectedFaces.length == 0 && !meta) { - onSelectEvent(event); + if (!meta) { + return; } else { const anySelected = group.find((face) => selectedFaces.includes(face.filename)) != @@ -798,7 +776,7 @@ function FaceAttemptGroup({ } } }, - [event, group, selectedFaces, onClickFaces, onSelectEvent], + [group, selectedFaces, onClickFaces], ); // api calls @@ -873,7 +851,6 @@ function FaceAttemptGroup({ handleClickEvent(true); } }} - onSelectEvent={onSelectEvent} > {(data) => ( <> @@ -881,12 +858,12 @@ function FaceAttemptGroup({ faceNames={faceNames} onTrainAttempt={(name) => onTrainAttempt(data, name)} > - + onReprocess(data)} /> @@ -934,36 +911,35 @@ function FaceGrid({
    {sortedFaces.map((image: string) => ( - onClickFaces([data.filename], meta)} - > - - - { - e.stopPropagation(); - onDelete(pageToggle, [image]); - }} - /> - - {t("button.deleteFaceAttempts")} - - +
    + onClickFaces([data.filename], meta)} + > + + + { + e.stopPropagation(); + onDelete(pageToggle, [image]); + }} + /> + + {t("button.deleteFaceAttempts")} + + +
    ))}
    ); diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index f82ca9838..ffe4cc14d 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -304,10 +304,10 @@ export type CustomClassificationModelConfig = { enabled: boolean; name: string; threshold: number; - object_config: null | { + object_config?: { objects: string[]; }; - state_config: null | { + state_config?: { cameras: { [cameraName: string]: { crop: [number, number, number, number]; diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index b91ea5648..6d6287b4d 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -1,24 +1,39 @@ import { baseUrl } from "@/api/baseUrl"; +import ClassificationModelWizardDialog from "@/components/classification/ClassificationModelWizardDialog"; import ActivityIndicator from "@/components/indicators/activity-indicator"; +import { ImageShadowOverlay } from "@/components/overlay/ImageShadowOverlay"; +import { Button } from "@/components/ui/button"; +import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group"; +import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; import { CustomClassificationModelConfig, FrigateConfig, } from "@/types/frigateConfig"; -import { useMemo } from "react"; +import { useMemo, useState } from "react"; import { isMobile } from "react-device-detect"; +import { useTranslation } from "react-i18next"; +import { FaFolderPlus } from "react-icons/fa"; import useSWR from "swr"; +const allModelTypes = ["objects", "states"] as const; +type ModelType = (typeof allModelTypes)[number]; + type ModelSelectionViewProps = { onClick: (model: CustomClassificationModelConfig) => void; }; export default function ModelSelectionView({ onClick, }: ModelSelectionViewProps) { + const { t } = useTranslation(["views/classificationModel"]); + const [page, setPage] = useState("objects"); + const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100); const { data: config } = useSWR("config", { revalidateOnFocus: false, }); + // data + const classificationConfigs = useMemo(() => { if (!config) { return []; @@ -27,6 +42,24 @@ export default function ModelSelectionView({ return Object.values(config.classification.custom); }, [config]); + const selectedClassificationConfigs = useMemo(() => { + return classificationConfigs.filter((model) => { + if (pageToggle == "objects" && model.object_config != undefined) { + return true; + } + + if (pageToggle == "states" && model.state_config != undefined) { + return true; + } + + return false; + }); + }, [classificationConfigs, pageToggle]); + + // new model wizard + + const [newModel, setNewModel] = useState(false); + if (!config) { return ; } @@ -36,14 +69,62 @@ export default function ModelSelectionView({ } return ( -
    - {classificationConfigs.map((config) => ( - onClick(config)} - /> - ))} +
    + setNewModel(false)} + /> + +
    +
    + { + if (value) { + // Restrict viewer navigation + setPageToggle(value); + } + }} + > + {allModelTypes.map((item) => ( + +
    {t("menu." + item)}
    +
    + ))} +
    +
    +
    + +
    +
    +
    + {selectedClassificationConfigs.map((config) => ( + onClick(config)} + /> + ))} +
    ); } @@ -57,46 +138,37 @@ function ModelCard({ config, onClick }: ModelCardProps) { [id: string]: string[]; }>(`classification/${config.name}/dataset`, { revalidateOnFocus: false }); - const coverImages = useMemo(() => { - if (!dataset) { - return {}; + const coverImage = useMemo(() => { + if (!dataset?.length) { + return undefined; } - const imageMap: { [key: string]: string } = {}; + const keys = Object.keys(dataset).filter((key) => key != "none"); + const selectedKey = keys[0]; - for (const [key, imageList] of Object.entries(dataset)) { - if (imageList.length > 0) { - imageMap[key] = imageList[0]; - } - } - - return imageMap; + return { + name: selectedKey, + img: dataset[selectedKey][0], + }; }, [dataset]); return (
    onClick()} > -
    - {Object.entries(coverImages).map(([key, image]) => ( - - ))} -
    -
    - {config.name} ({config.state_config != null ? "State" : "Object"}{" "} - Classification) + + +
    + {config.name}
    ); diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index d3fd5b40e..6f9e479c2 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -44,7 +44,7 @@ import { useRef, useState, } from "react"; -import { isDesktop, isMobile } from "react-device-detect"; +import { isDesktop } from "react-device-detect"; import { Trans, useTranslation } from "react-i18next"; import { LuPencil, LuTrash2 } from "react-icons/lu"; import { toast } from "sonner"; @@ -56,7 +56,6 @@ import { ModelState } from "@/types/ws"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import { useNavigate } from "react-router-dom"; import { IoMdArrowRoundBack } from "react-icons/io"; -import { MdAutoFixHigh } from "react-icons/md"; import TrainFilterDialog from "@/components/overlay/dialog/TrainFilterDialog"; import useApiFilter from "@/hooks/use-api-filter"; import { ClassificationItemData, TrainFilter } from "@/types/classification"; @@ -69,6 +68,7 @@ import SearchDetailDialog, { SearchTab, } from "@/components/overlay/detail/SearchDetailDialog"; import { SearchResult } from "@/types/search"; +import { HiSparkles } from "react-icons/hi"; type ModelTrainingViewProps = { model: CustomClassificationModelConfig; @@ -378,12 +378,13 @@ export default function ModelTrainingView({ model }: ModelTrainingViewProps) { @@ -631,37 +632,36 @@ function DatasetGrid({ return (
    {classData.map((image) => ( - onClickImages([data.filename], true)} - > - - - { - e.stopPropagation(); - onDelete([image]); - }} - /> - - - {t("button.deleteClassificationAttempts")} - - - +
    + onClickImages([data.filename], true)} + > + + + { + e.stopPropagation(); + onDelete([image]); + }} + /> + + + {t("button.deleteClassificationAttempts")} + + + +
    ))}
    ); @@ -757,7 +757,6 @@ function TrainGrid({ selectedImages={selectedImages} onClickImages={onClickImages} onRefresh={onRefresh} - onDelete={onDelete} /> ); } @@ -780,10 +779,7 @@ function StateTrainGrid({ selectedImages, onClickImages, onRefresh, - onDelete, }: StateTrainGridProps) { - const { t } = useTranslation(["views/classificationModel"]); - const threshold = useMemo(() => { return { recognition: model.threshold, @@ -795,45 +791,29 @@ function StateTrainGrid({
    {trainData?.map((data) => ( - onClickImages([data.filename], meta)} - > - + onClickImages([data.filename], meta)} > - - - - - { - e.stopPropagation(); - onDelete([data.filename]); - }} - /> - - - {t("button.deleteClassificationAttempts")} - - - + + + + +
    ))}
    ); @@ -847,7 +827,6 @@ type ObjectTrainGridProps = { selectedImages: string[]; onClickImages: (images: string[], ctrl: boolean) => void; onRefresh: () => void; - onDelete: (ids: string[]) => void; }; function ObjectTrainGrid({ model, @@ -857,10 +836,7 @@ function ObjectTrainGrid({ selectedImages, onClickImages, onRefresh, - onDelete, }: ObjectTrainGridProps) { - const { t } = useTranslation(["views/classificationModel"]); - // item data const groups = useMemo(() => { @@ -950,55 +926,43 @@ function ObjectTrainGrid({
    {Object.entries(groups).map(([key, group]) => { const event = events?.find((ev) => ev.id == key); return ( - { - if (data) { - onClickImages([data.filename], true); - } else { - handleClickEvent(group, event, true); - } - }} - onSelectEvent={() => {}} - > - {(data) => ( - <> - - - - - - { - e.stopPropagation(); - onDelete([data.filename]); - }} - /> - - - {t("button.deleteClassificationAttempts")} - - - - )} - +
    + { + if (data) { + onClickImages([data.filename], true); + } else { + handleClickEvent(group, event, true); + } + }} + > + {(data) => ( + <> + + + + + )} + +
    ); })}