mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-06 05:24:11 +03:00
RKNN Fixes (#20380)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
* Fix arm64 unable to optimize onnx * Move to onnx format for rknn
This commit is contained in:
parent
37afd5da6b
commit
33f0c23389
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
@ -13,6 +14,30 @@ from frigate.util.rknn_converter import auto_convert_model, is_rknn_compatible
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def is_arm64_platform() -> bool:
|
||||||
|
"""Check if we're running on an ARM platform."""
|
||||||
|
machine = platform.machine().lower()
|
||||||
|
return machine in ("aarch64", "arm64", "armv8", "armv7l")
|
||||||
|
|
||||||
|
|
||||||
|
def get_ort_session_options() -> ort.SessionOptions | None:
|
||||||
|
"""Get ONNX Runtime session options with appropriate settings.
|
||||||
|
|
||||||
|
On ARM/RKNN platforms, use basic optimizations to avoid graph fusion issues
|
||||||
|
that can break certain models. On amd64, use default optimizations for better performance.
|
||||||
|
"""
|
||||||
|
sess_options = None
|
||||||
|
|
||||||
|
if is_arm64_platform():
|
||||||
|
sess_options = ort.SessionOptions()
|
||||||
|
sess_options.graph_optimization_level = (
|
||||||
|
ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
|
||||||
|
)
|
||||||
|
|
||||||
|
return sess_options
|
||||||
|
|
||||||
|
|
||||||
# Import OpenVINO only when needed to avoid circular dependencies
|
# Import OpenVINO only when needed to avoid circular dependencies
|
||||||
try:
|
try:
|
||||||
import openvino as ov
|
import openvino as ov
|
||||||
@ -469,6 +494,7 @@ def get_optimized_runner(
|
|||||||
return ONNXModelRunner(
|
return ONNXModelRunner(
|
||||||
ort.InferenceSession(
|
ort.InferenceSession(
|
||||||
model_path,
|
model_path,
|
||||||
|
sess_options=get_ort_session_options(),
|
||||||
providers=providers,
|
providers=providers,
|
||||||
provider_options=options,
|
provider_options=options,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -107,8 +107,11 @@ class Rknn(DetectionApi):
|
|||||||
# Determine model type from config
|
# Determine model type from config
|
||||||
model_type = self.detector_config.model.model_type
|
model_type = self.detector_config.model.model_type
|
||||||
|
|
||||||
|
# Convert enum to string if needed
|
||||||
|
model_type_str = model_type.value if model_type else None
|
||||||
|
|
||||||
# Auto-convert the model
|
# Auto-convert the model
|
||||||
converted_path = auto_convert_model(model_path, model_type.value)
|
converted_path = auto_convert_model(model_path, model_type_str)
|
||||||
|
|
||||||
if converted_path:
|
if converted_path:
|
||||||
model_props["path"] = converted_path
|
model_props["path"] = converted_path
|
||||||
|
|||||||
@ -14,7 +14,7 @@ logger = logging.getLogger(__name__)
|
|||||||
MODEL_TYPE_CONFIGS = {
|
MODEL_TYPE_CONFIGS = {
|
||||||
"yolo-generic": {
|
"yolo-generic": {
|
||||||
"mean_values": [[0, 0, 0]],
|
"mean_values": [[0, 0, 0]],
|
||||||
"std_values": [[255, 255, 255]],
|
"std_values": [[1, 1, 1]],
|
||||||
"target_platform": None, # Will be set dynamically
|
"target_platform": None, # Will be set dynamically
|
||||||
},
|
},
|
||||||
"yolonas": {
|
"yolonas": {
|
||||||
@ -179,6 +179,22 @@ def convert_onnx_to_rknn(
|
|||||||
config = MODEL_TYPE_CONFIGS[model_type].copy()
|
config = MODEL_TYPE_CONFIGS[model_type].copy()
|
||||||
config["target_platform"] = soc
|
config["target_platform"] = soc
|
||||||
|
|
||||||
|
# RKNN toolkit requires .onnx extension, create temporary copy if needed
|
||||||
|
temp_onnx_path = None
|
||||||
|
onnx_model_path = onnx_path
|
||||||
|
|
||||||
|
if not onnx_path.endswith(".onnx"):
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
temp_onnx_path = f"{onnx_path}.onnx"
|
||||||
|
logger.debug(f"Creating temporary ONNX copy: {temp_onnx_path}")
|
||||||
|
try:
|
||||||
|
shutil.copy2(onnx_path, temp_onnx_path)
|
||||||
|
onnx_model_path = temp_onnx_path
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to create temporary ONNX copy: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from rknn.api import RKNN # type: ignore
|
from rknn.api import RKNN # type: ignore
|
||||||
|
|
||||||
@ -188,18 +204,18 @@ def convert_onnx_to_rknn(
|
|||||||
|
|
||||||
if model_type == "jina-clip-v1-vision":
|
if model_type == "jina-clip-v1-vision":
|
||||||
load_output = rknn.load_onnx(
|
load_output = rknn.load_onnx(
|
||||||
model=onnx_path,
|
model=onnx_model_path,
|
||||||
inputs=["pixel_values"],
|
inputs=["pixel_values"],
|
||||||
input_size_list=[[1, 3, 224, 224]],
|
input_size_list=[[1, 3, 224, 224]],
|
||||||
)
|
)
|
||||||
elif model_type == "arcface-r100":
|
elif model_type == "arcface-r100":
|
||||||
load_output = rknn.load_onnx(
|
load_output = rknn.load_onnx(
|
||||||
model=onnx_path,
|
model=onnx_model_path,
|
||||||
inputs=["data"],
|
inputs=["data"],
|
||||||
input_size_list=[[1, 3, 112, 112]],
|
input_size_list=[[1, 3, 112, 112]],
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
load_output = rknn.load_onnx(model=onnx_path)
|
load_output = rknn.load_onnx(model=onnx_model_path)
|
||||||
|
|
||||||
if load_output != 0:
|
if load_output != 0:
|
||||||
logger.error("Failed to load ONNX model")
|
logger.error("Failed to load ONNX model")
|
||||||
@ -219,6 +235,14 @@ def convert_onnx_to_rknn(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error during RKNN conversion: {e}")
|
logger.error(f"Error during RKNN conversion: {e}")
|
||||||
return False
|
return False
|
||||||
|
finally:
|
||||||
|
# Clean up temporary file if created
|
||||||
|
if temp_onnx_path and os.path.exists(temp_onnx_path):
|
||||||
|
try:
|
||||||
|
os.remove(temp_onnx_path)
|
||||||
|
logger.debug(f"Removed temporary ONNX file: {temp_onnx_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to remove temporary ONNX file: {e}")
|
||||||
|
|
||||||
|
|
||||||
def cleanup_stale_lock(lock_file_path: Path) -> bool:
|
def cleanup_stale_lock(lock_file_path: Path) -> bool:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user