diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py index 7375cf40e..8fd58c168 100644 --- a/frigate/embeddings/functions/onnx.py +++ b/frigate/embeddings/functions/onnx.py @@ -221,6 +221,9 @@ class GenericONNXEmbedding: # copy img image into center of result image frame[y_center : y_center + og_h, x_center : x_center + og_w] = og + # standardize pixel values across channels + mean, std = frame.mean(), frame.std() + frame = (frame - mean) / std frame = np.expand_dims(frame, axis=0) return [{"input_2": frame}] elif self.model_type == ModelTypeEnum.lpr_detect: diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 2b3489a4c..feafcaa7d 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -406,7 +406,7 @@ class EmbeddingMaintainer(threading.Thread): logger.debug("Detected no faces for person object.") return - margin = int((face_box[2] - face_box[0]) * 0.4) + margin = int((face_box[2] - face_box[0]) * 0.25) face_frame = person[ max(0, face_box[1] - margin) : min( frame.shape[0], face_box[3] + margin @@ -442,7 +442,7 @@ class EmbeddingMaintainer(threading.Thread): return face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) - margin = int((face_box[2] - face_box[0]) * 0.4) + margin = int((face_box[2] - face_box[0]) * 0.25) face_frame = face_frame[ max(0, face_box[1] - margin) : min(