mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-27 22:48:29 +03:00
suppress tensorflow logging during classification training
This commit is contained in:
parent
ba4f4304ec
commit
886d9c4a55
@ -237,8 +237,18 @@ ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
|||||||
# Set HailoRT to disable logging
|
# Set HailoRT to disable logging
|
||||||
ENV HAILORT_LOGGER_PATH=NONE
|
ENV HAILORT_LOGGER_PATH=NONE
|
||||||
|
|
||||||
# TensorFlow error only
|
# TensorFlow C++ logging suppression (must be set before import)
|
||||||
|
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
|
||||||
ENV TF_CPP_MIN_LOG_LEVEL=3
|
ENV TF_CPP_MIN_LOG_LEVEL=3
|
||||||
|
# Suppress verbose logging from TensorFlow C++ code
|
||||||
|
ENV TF_CPP_MIN_VLOG_LEVEL=3
|
||||||
|
# Disable oneDNN optimization messages ("optimized with oneDNN...")
|
||||||
|
ENV TF_ENABLE_ONEDNN_OPTS=0
|
||||||
|
# Suppress AutoGraph verbosity during conversion
|
||||||
|
ENV AUTOGRAPH_VERBOSITY=0
|
||||||
|
# Google Logging (GLOG) suppression for TensorFlow components
|
||||||
|
ENV GLOG_minloglevel=3
|
||||||
|
ENV GLOG_logtostderr=0
|
||||||
|
|
||||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||||
|
|
||||||
|
|||||||
@ -80,10 +80,15 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
|
|||||||
log_levels = {
|
log_levels = {
|
||||||
"absl": LogLevel.error,
|
"absl": LogLevel.error,
|
||||||
"httpx": LogLevel.error,
|
"httpx": LogLevel.error,
|
||||||
|
"h5py": LogLevel.error,
|
||||||
|
"keras": LogLevel.error,
|
||||||
"matplotlib": LogLevel.error,
|
"matplotlib": LogLevel.error,
|
||||||
"tensorflow": LogLevel.error,
|
"tensorflow": LogLevel.error,
|
||||||
|
"tensorflow.python": LogLevel.error,
|
||||||
"werkzeug": LogLevel.error,
|
"werkzeug": LogLevel.error,
|
||||||
"ws4py": LogLevel.error,
|
"ws4py": LogLevel.error,
|
||||||
|
"PIL": LogLevel.warning,
|
||||||
|
"numba": LogLevel.warning,
|
||||||
**log_levels,
|
**log_levels,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,3 +323,31 @@ def suppress_os_output(func: Callable) -> Callable:
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def suppress_stderr_during(operation_name: str) -> Generator[None, None, None]:
|
||||||
|
"""
|
||||||
|
Context manager to suppress stderr output during a specific operation.
|
||||||
|
|
||||||
|
Useful for silencing LLVM debug output, CUDA messages, and other native
|
||||||
|
library logging that cannot be controlled via Python logging or environment
|
||||||
|
variables. Completely redirects file descriptor 2 (stderr) to /dev/null.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
with suppress_stderr_during("model_conversion"):
|
||||||
|
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||||
|
tflite_model = converter.convert()
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operation_name: Name of the operation for debugging purposes
|
||||||
|
"""
|
||||||
|
original_stderr_fd = os.dup(2)
|
||||||
|
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||||
|
try:
|
||||||
|
os.dup2(devnull, 2)
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
os.dup2(original_stderr_fd, 2)
|
||||||
|
os.close(devnull)
|
||||||
|
os.close(original_stderr_fd)
|
||||||
|
|||||||
@ -19,7 +19,7 @@ from frigate.const import (
|
|||||||
PROCESS_PRIORITY_LOW,
|
PROCESS_PRIORITY_LOW,
|
||||||
UPDATE_MODEL_STATE,
|
UPDATE_MODEL_STATE,
|
||||||
)
|
)
|
||||||
from frigate.log import redirect_output_to_logger
|
from frigate.log import redirect_output_to_logger, suppress_stderr_during
|
||||||
from frigate.models import Event, Recordings, ReviewSegment
|
from frigate.models import Event, Recordings, ReviewSegment
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.downloader import ModelDownloader
|
from frigate.util.downloader import ModelDownloader
|
||||||
@ -250,15 +250,20 @@ class ClassificationTrainingProcess(FrigateProcess):
|
|||||||
logger.debug(f"Converting {self.model_name} to TFLite...")
|
logger.debug(f"Converting {self.model_name} to TFLite...")
|
||||||
|
|
||||||
# convert model to tflite
|
# convert model to tflite
|
||||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
# Suppress stderr during conversion to avoid LLVM debug output
|
||||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
# (fully_quantize, inference_type, MLIR optimization messages, etc)
|
||||||
converter.representative_dataset = (
|
with suppress_stderr_during("tflite_conversion"):
|
||||||
self.__generate_representative_dataset_factory(dataset_dir)
|
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||||
)
|
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
converter.representative_dataset = (
|
||||||
converter.inference_input_type = tf.uint8
|
self.__generate_representative_dataset_factory(dataset_dir)
|
||||||
converter.inference_output_type = tf.uint8
|
)
|
||||||
tflite_model = converter.convert()
|
converter.target_spec.supported_ops = [
|
||||||
|
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
|
||||||
|
]
|
||||||
|
converter.inference_input_type = tf.uint8
|
||||||
|
converter.inference_output_type = tf.uint8
|
||||||
|
tflite_model = converter.convert()
|
||||||
|
|
||||||
# write model
|
# write model
|
||||||
model_path = os.path.join(model_dir, "model.tflite")
|
model_path = os.path.join(model_dir, "model.tflite")
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user