diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt index c9b618180..78d659746 100644 --- a/docker/tensorrt/requirements-arm64.txt +++ b/docker/tensorrt/requirements-arm64.txt @@ -1 +1,2 @@ cuda-python == 12.6.*; platform_machine == 'aarch64' +numpy == 1.26.*; platform_machine == 'aarch64' diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index 6148e8c05..2c6b02103 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): camera = obj_data["camera"] if not self.config.cameras[camera].face_recognition.enabled: + logger.debug(f"Face recognition disabled for camera {camera}, skipping") return start = datetime.datetime.now().timestamp() @@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): person_box = obj_data.get("box") if not person_box: + logger.debug(f"No person box available for {id}") return rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) @@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): try: face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR) - except Exception: + except Exception as e: + logger.debug(f"Failed to convert face frame color for {id}: {e}") return else: # don't run for object without attributes @@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): # no faces detected in this frame if not face: + logger.debug(f"No face attributes found for {id}") return face_box = face.get("box") @@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): res = self.recognizer.classify(face_frame) if not res: + logger.debug(f"Face recognizer returned no result for {id}") self.__update_metrics(datetime.datetime.now().timestamp() - start) return diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 4ab8132c1..d169d2d88 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread): self.realtime_processors: list[RealTimeProcessorApi] = [] if self.config.face_recognition.enabled: + logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor") self.realtime_processors.append( FaceRealTimeProcessor( self.config, self.requestor, self.event_metadata_publisher, metrics ) ) + logger.debug("FaceRealTimeProcessor initialized successfully") if self.config.classification.bird.enabled: self.realtime_processors.append( diff --git a/web/src/views/classification/ModelSelectionView.tsx b/web/src/views/classification/ModelSelectionView.tsx index b353be65f..b1b462497 100644 --- a/web/src/views/classification/ModelSelectionView.tsx +++ b/web/src/views/classification/ModelSelectionView.tsx @@ -214,7 +214,7 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { const handleDelete = useCallback(async () => { try { - // First, remove from config to stop the processor + await axios.delete(`classification/${config.name}`); await axios.put("/config/set", { requires_restart: 0, update_topic: `config/classification/custom/${config.name}`, @@ -227,9 +227,6 @@ function ModelCard({ config, onClick, onDelete }: ModelCardProps) { }, }); - // Then, delete the model data and files - await axios.delete(`classification/${config.name}`); - toast.success(t("toast.success.deletedModel", { count: 1 }), { position: "top-center", });