Use model type to decide if model can use full optimization

This commit is contained in:
Nicolas Mowen 2025-09-18 13:59:34 -06:00
parent 161ed46c55
commit 62c1efc430
3 changed files with 9 additions and 4 deletions

View File

@ -6,8 +6,6 @@ from abc import ABC, abstractmethod
from typing import Any
import numpy as np
from frigate.detectors.detector_types import ModelTypeEnum
from frigate.embeddings.types import EnrichmentModelTypeEnum
import onnxruntime as ort
from frigate.util.model import get_ort_providers
@ -105,6 +103,10 @@ class CudaGraphRunner(BaseModelRunner):
@staticmethod
def is_complex_model(model_type: str) -> bool:
# Import here to avoid circular imports
from frigate.detectors.detector_config import ModelTypeEnum
from frigate.embeddings.types import EnrichmentModelTypeEnum
return model_type in [
ModelTypeEnum.yolonas.value,
EnrichmentModelTypeEnum.paddleocr.value,
@ -169,6 +171,9 @@ class OpenVINOModelRunner(BaseModelRunner):
@staticmethod
def is_complex_model(model_type: str) -> bool:
# Import here to avoid circular imports
from frigate.embeddings.types import EnrichmentModelTypeEnum
return model_type in [EnrichmentModelTypeEnum.paddleocr.value]
def __init__(self, model_path: str, device: str, model_type: str, **kwargs):

View File

@ -39,7 +39,7 @@ class ONNXDetector(DetectionApi):
self.runner = get_optimized_runner(
path,
detector_config.device,
complex_model=False,
model_type=detector_config.model.model_type,
)
self.onnx_model_type = detector_config.model.model_type

View File

@ -45,7 +45,7 @@ class OvDetector(DetectionApi):
self.runner = OpenVINOModelRunner(
model_path=detector_config.model.path,
device=detector_config.device,
complex_model=False,
model_type=detector_config.model.model_type,
)
# For dfine models, also pre-allocate target sizes tensor