diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 5324105d4..9b479a527 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -229,7 +229,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) face_frame = face_frame[ - max(0, face_box[1]) : min(frame.shape[0], face_box[3]),F + max(0, face_box[1]) : min(frame.shape[0], face_box[3]), max(0, face_box[0]) : min(frame.shape[1], face_box[2]), ] diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index 0ac937283..51ab7b610 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -33,7 +33,7 @@ import useKeyboardListener from "@/hooks/use-keyboard-listener"; import useOptimisticState from "@/hooks/use-optimistic-state"; import { cn } from "@/lib/utils"; import { FaceLibraryData, RecognizedFaceData } from "@/types/face"; -import { FrigateConfig } from "@/types/frigateConfig"; +import { FaceRecognitionConfig, FrigateConfig } from "@/types/frigateConfig"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; @@ -451,7 +451,7 @@ function TrainingGrid({ key={image} image={image} faceNames={faceNames} - threshold={config.face_recognition.recognition_threshold} + recognitionConfig={config.face_recognition} selected={selectedFaces.includes(image)} onClick={(data, meta) => { if (meta) { @@ -471,7 +471,7 @@ function TrainingGrid({ type FaceAttemptProps = { image: string; faceNames: string[]; - threshold: number; + recognitionConfig: FaceRecognitionConfig; selected: boolean; onClick: (data: RecognizedFaceData, meta: boolean) => void; onRefresh: () => void; @@ -479,7 +479,7 @@ type FaceAttemptProps = { function FaceAttempt({ image, faceNames, - threshold, + recognitionConfig, selected, onClick, onRefresh, @@ -496,6 +496,16 @@ function FaceAttempt({ }; }, [image]); + const scoreStatus = useMemo(() => { + if (data.score >= recognitionConfig.recognition_threshold) { + return "match"; + } else if (data.score >= recognitionConfig.unknown_score) { + return "potential"; + } else { + return "unknown"; + } + }, [data]); + // interaction const imgRef = useRef(null); @@ -579,10 +589,13 @@ function FaceAttempt({
{data.name}
= threshold ? "text-success" : "text-danger", + "", + scoreStatus == "match" && "text-success", + scoreStatus == "potential" && "text-orange-400", + scoreStatus == "unknown" && "text-danger", )} > - {data.score * 100}% + {Math.round(data.score * 100)}%
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 5312bed8c..d66d5edcb 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -20,6 +20,14 @@ export interface BirdseyeConfig { width: number; } +export interface FaceRecognitionConfig { + enabled: boolean; + model_size: SearchModelSize; + unknown_score: number; + detection_threshold: number; + recognition_threshold: number; +} + export type SearchModel = "jinav1" | "jinav2"; export type SearchModelSize = "small" | "large"; @@ -331,12 +339,7 @@ export interface FrigateConfig { environment_vars: Record; - face_recognition: { - enabled: boolean; - model_size: SearchModelSize; - detection_threshold: number; - recognition_threshold: number; - }; + face_recognition: FaceRecognitionConfig; ffmpeg: { global_args: string[];