mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-18 09:04:28 +03:00
Support downloading face models
This commit is contained in:
parent
160664836f
commit
2c182d59f0
@ -123,19 +123,6 @@ class Embeddings:
|
|||||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.config.face_recognition.enabled:
|
|
||||||
self.face_embedding = GenericONNXEmbedding(
|
|
||||||
model_name="facedet",
|
|
||||||
model_file="facedet.onnx",
|
|
||||||
download_urls={
|
|
||||||
"facedet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
|
||||||
"landmarkdet.yaml": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
|
||||||
},
|
|
||||||
model_size="small",
|
|
||||||
model_type=ModelTypeEnum.face,
|
|
||||||
requestor=self.requestor,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.lpr_detection_model = None
|
self.lpr_detection_model = None
|
||||||
self.lpr_classification_model = None
|
self.lpr_classification_model = None
|
||||||
self.lpr_recognition_model = None
|
self.lpr_recognition_model = None
|
||||||
|
|||||||
@ -100,19 +100,6 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
self.lpr_config, self.requestor, self.embeddings
|
self.lpr_config, self.requestor, self.embeddings
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
|
||||||
def face_detector(self) -> cv2.FaceDetectorYN:
|
|
||||||
# Lazily create the classifier.
|
|
||||||
if "face_detector" not in self.__dict__:
|
|
||||||
self.__dict__["face_detector"] = cv2.FaceDetectorYN.create(
|
|
||||||
"/config/model_cache/facedet/facedet.onnx",
|
|
||||||
config="",
|
|
||||||
input_size=(320, 320),
|
|
||||||
score_threshold=0.8,
|
|
||||||
nms_threshold=0.3,
|
|
||||||
)
|
|
||||||
return self.__dict__["face_detector"]
|
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
"""Maintain a SQLite-vec database for semantic search."""
|
"""Maintain a SQLite-vec database for semantic search."""
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
@ -395,10 +382,9 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
def _detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
def _detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||||
"""Detect faces in input image."""
|
"""Detect faces in input image."""
|
||||||
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
faces = self.face_classifier.detect_faces(input)
|
||||||
faces = self.face_detector.detect(input)
|
|
||||||
|
|
||||||
if faces[1] is None:
|
if faces is None or faces[1] is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
face = None
|
face = None
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Any, Optional
|
from typing import Any
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -10,6 +10,8 @@ import onnxruntime as ort
|
|||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
|
|
||||||
from frigate.config.semantic_search import FaceRecognitionConfig
|
from frigate.config.semantic_search import FaceRecognitionConfig
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import openvino as ov
|
import openvino as ov
|
||||||
@ -162,21 +164,50 @@ class FaceClassificationModel:
|
|||||||
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.db = db
|
self.db = db
|
||||||
self.landmark_detector = cv2.face.createFacemarkLBF()
|
self.face_detector: cv2.FaceDetectorYN = None
|
||||||
|
self.landmark_detector: cv2.face.FacemarkLBF = None
|
||||||
|
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
|
||||||
|
|
||||||
if os.path.isfile("/config/model_cache/facedet/landmarkdet.yaml"):
|
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
||||||
self.landmark_detector.loadModel(
|
model_files = ["facedet.onnx", "landmarkdet.yaml"]
|
||||||
"/config/model_cache/facedet/landmarkdet.yaml"
|
self.model_urls = [
|
||||||
)
|
"https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facedet.onnx",
|
||||||
|
"https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/landmarkdet.yaml",
|
||||||
|
]
|
||||||
|
|
||||||
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
if not all(os.path.exists(os.path.join(download_path, n)) for n in model_files):
|
||||||
cv2.face.LBPHFaceRecognizer_create(
|
logger.debug(f"starting model download for {self.model_name}")
|
||||||
radius=2, threshold=(1 - config.min_score) * 1000
|
self.downloader = ModelDownloader(
|
||||||
)
|
model_name="facedet.onnx",
|
||||||
|
download_path=download_path,
|
||||||
|
file_names=model_files,
|
||||||
|
download_func=self.__download_models,
|
||||||
)
|
)
|
||||||
|
self.downloader.ensure_model_files()
|
||||||
|
else:
|
||||||
|
self.__build_detector()
|
||||||
|
|
||||||
self.label_map: dict[int, str] = {}
|
self.label_map: dict[int, str] = {}
|
||||||
self.__build_classifier()
|
self.__build_classifier()
|
||||||
|
|
||||||
|
def __download_models(self, path: str) -> None:
|
||||||
|
try:
|
||||||
|
file_name = os.path.basename(path)
|
||||||
|
ModelDownloader.download_from_url(self.model_urls[file_name], path)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __build_detector(self) -> None:
|
||||||
|
self.face_detector = cv2.FaceDetectorYN.create(
|
||||||
|
"/config/model_cache/facedet/facedet.onnx",
|
||||||
|
config="",
|
||||||
|
input_size=(320, 320),
|
||||||
|
score_threshold=0.8,
|
||||||
|
nms_threshold=0.3,
|
||||||
|
)
|
||||||
|
self.landmark_detector = cv2.face.createFacemarkLBF()
|
||||||
|
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
||||||
|
|
||||||
def __build_classifier(self) -> None:
|
def __build_classifier(self) -> None:
|
||||||
labels = []
|
labels = []
|
||||||
faces = []
|
faces = []
|
||||||
@ -203,6 +234,11 @@ class FaceClassificationModel:
|
|||||||
faces.append(img)
|
faces.append(img)
|
||||||
labels.append(idx)
|
labels.append(idx)
|
||||||
|
|
||||||
|
self.recognizer: cv2.face.LBPHFaceRecognizer = (
|
||||||
|
cv2.face.LBPHFaceRecognizer_create(
|
||||||
|
radius=2, threshold=(1 - self.config.min_score) * 1000
|
||||||
|
)
|
||||||
|
)
|
||||||
self.recognizer.train(faces, np.array(labels))
|
self.recognizer.train(faces, np.array(labels))
|
||||||
|
|
||||||
def __align_face(
|
def __align_face(
|
||||||
@ -267,7 +303,17 @@ class FaceClassificationModel:
|
|||||||
self.labeler = None
|
self.labeler = None
|
||||||
self.label_map = {}
|
self.label_map = {}
|
||||||
|
|
||||||
def classify_face(self, face_image: np.ndarray) -> Optional[tuple[str, float]]:
|
def detect_faces(self, input: np.ndarray) -> tuple[int, cv2.typing.MatLike] | None:
|
||||||
|
if not self.face_detector:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
|
||||||
|
return self.face_detector.detect(input)
|
||||||
|
|
||||||
|
def classify_face(self, face_image: np.ndarray) -> tuple[str, float] | None:
|
||||||
|
if not self.landmark_detector:
|
||||||
|
return None
|
||||||
|
|
||||||
if not self.label_map:
|
if not self.label_map:
|
||||||
self.__build_classifier()
|
self.__build_classifier()
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user