Fix arm64 unable to optimize onnx

This commit is contained in:
Nicolas Mowen 2025-10-07 11:36:57 -06:00
parent 4bea69591b
commit a9942c9d6d
2 changed files with 30 additions and 1 deletions

View File

@ -2,6 +2,7 @@
import logging
import os
import platform
from abc import ABC, abstractmethod
from typing import Any
@ -13,6 +14,30 @@ from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
logger = logging.getLogger(__name__)
def is_arm64_platform() -> bool:
"""Check if we're running on an ARM platform."""
machine = platform.machine().lower()
return machine in ("aarch64", "arm64", "armv8", "armv7l")
def get_ort_session_options() -> ort.SessionOptions | None:
"""Get ONNX Runtime session options with appropriate settings.
On ARM/RKNN platforms, use basic optimizations to avoid graph fusion issues
that can break certain models. On amd64, use default optimizations for better performance.
"""
sess_options = None
if is_arm64_platform():
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
)
return sess_options
# Import OpenVINO only when needed to avoid circular dependencies
try:
import openvino as ov
@ -469,6 +494,7 @@ def get_optimized_runner(
return ONNXModelRunner(
ort.InferenceSession(
model_path,
sess_options=get_ort_session_options(),
providers=providers,
provider_options=options,
)

View File

@ -107,8 +107,11 @@ class Rknn(DetectionApi):
# Determine model type from config
model_type = self.detector_config.model.model_type
# Convert enum to string if needed
model_type_str = model_type.value if model_type else None
# Auto-convert the model
converted_path = auto_convert_model(model_path, model_type.value)
converted_path = auto_convert_model(model_path, model_type_str)
if converted_path:
model_props["path"] = converted_path