Compare commits

..

2 Commits

Author SHA1 Message Date
Nicolas Mowen
29ca18c24c
Add warm-up to onnx as some GPUs require kernel compilation before accepting inferences (#22685)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
2026-03-29 11:19:46 -05:00
GuoQing Liu
148e11afc5
fix: fix classification none label i18n issue (#22680) 2026-03-29 05:51:29 -06:00
2 changed files with 31 additions and 1 deletions

View File

@ -8,6 +8,8 @@ from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detection_runners import get_optimized_runner from frigate.detectors.detection_runners import get_optimized_runner
from frigate.detectors.detector_config import ( from frigate.detectors.detector_config import (
BaseDetectorConfig, BaseDetectorConfig,
InputDTypeEnum,
InputTensorEnum,
ModelTypeEnum, ModelTypeEnum,
) )
from frigate.util.model import ( from frigate.util.model import (
@ -59,8 +61,34 @@ class ONNXDetector(DetectionApi):
if self.onnx_model_type == ModelTypeEnum.yolox: if self.onnx_model_type == ModelTypeEnum.yolox:
self.calculate_grids_strides() self.calculate_grids_strides()
self._warmup(detector_config)
logger.info(f"ONNX: {path} loaded") logger.info(f"ONNX: {path} loaded")
def _warmup(self, detector_config: ONNXDetectorConfig) -> None:
"""Run a warmup inference to front-load one-time compilation costs.
Some GPU backends have a slow first inference: CUDA may need PTX JIT
compilation on newer architectures (e.g. NVIDIA 50-series / Blackwell),
and MIGraphX compiles the model graph on first run. Running it here
(during detector creation) keeps the watchdog start_time at 0.0 so the
process won't be killed.
"""
if detector_config.model.input_tensor == InputTensorEnum.nchw:
shape = (1, 3, detector_config.model.height, detector_config.model.width)
else:
shape = (1, detector_config.model.height, detector_config.model.width, 3)
if detector_config.model.input_dtype in (
InputDTypeEnum.float,
InputDTypeEnum.float_denorm,
):
dtype = np.float32
else:
dtype = np.uint8
logger.info("ONNX: warming up detector (may take a while on first run)...")
self.detect_raw(np.zeros(shape, dtype=dtype))
def detect_raw(self, tensor_input: np.ndarray): def detect_raw(self, tensor_input: np.ndarray):
if self.onnx_model_type == ModelTypeEnum.dfine: if self.onnx_model_type == ModelTypeEnum.dfine:
tensor_output = self.runner.run( tensor_output = self.runner.run(

View File

@ -601,7 +601,9 @@ function LibrarySelector({
const [confirmDelete, setConfirmDelete] = useState<string | null>(null); const [confirmDelete, setConfirmDelete] = useState<string | null>(null);
const [renameClass, setRenameClass] = useState<string | null>(null); const [renameClass, setRenameClass] = useState<string | null>(null);
const pageTitle = useMemo(() => { const pageTitle = useMemo(() => {
if (pageToggle != "train") { if (pageToggle == "none") {
return t("details.none");
} else if (pageToggle != "train") {
return pageToggle; return pageToggle;
} }