2025-05-29 18:02:17 +03:00
|
|
|
# In log.py
|
2024-09-17 16:26:25 +03:00
|
|
|
import atexit
|
2025-06-25 16:24:45 +03:00
|
|
|
import io
|
2020-11-04 06:26:39 +03:00
|
|
|
import logging
|
2020-12-04 15:59:03 +03:00
|
|
|
import os
|
2024-09-24 15:07:47 +03:00
|
|
|
import sys
|
2023-05-29 13:31:17 +03:00
|
|
|
import threading
|
|
|
|
|
from collections import deque
|
2025-06-25 16:24:45 +03:00
|
|
|
from contextlib import contextmanager
|
2025-06-13 17:43:38 +03:00
|
|
|
from enum import Enum
|
2025-06-25 16:24:45 +03:00
|
|
|
from functools import wraps
|
2024-09-17 16:26:25 +03:00
|
|
|
from logging.handlers import QueueHandler, QueueListener
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
from multiprocessing.managers import SyncManager
|
2025-06-25 16:24:45 +03:00
|
|
|
from queue import Empty, Queue
|
|
|
|
|
from typing import Any, Callable, Deque, Generator, Optional
|
2023-05-29 13:31:17 +03:00
|
|
|
|
2023-07-06 17:28:50 +03:00
|
|
|
from frigate.util.builtin import clean_camera_user_pass
|
2022-11-02 15:00:54 +03:00
|
|
|
|
2024-09-17 16:26:25 +03:00
|
|
|
LOG_HANDLER = logging.StreamHandler()
|
|
|
|
|
LOG_HANDLER.setFormatter(
|
|
|
|
|
logging.Formatter(
|
|
|
|
|
"[%(asctime)s] %(name)-30s %(levelname)-8s: %(message)s",
|
|
|
|
|
"%Y-%m-%d %H:%M:%S",
|
|
|
|
|
)
|
|
|
|
|
)
|
2020-11-04 15:31:25 +03:00
|
|
|
|
2024-12-18 19:46:21 +03:00
|
|
|
# filter out norfair warning
|
2024-09-17 16:26:25 +03:00
|
|
|
LOG_HANDLER.addFilter(
|
|
|
|
|
lambda record: not record.getMessage().startswith(
|
|
|
|
|
"You are using a scalar distance function"
|
|
|
|
|
)
|
|
|
|
|
)
|
2023-04-26 14:08:53 +03:00
|
|
|
|
2024-12-18 19:46:21 +03:00
|
|
|
# filter out tflite logging
|
|
|
|
|
LOG_HANDLER.addFilter(
|
|
|
|
|
lambda record: "Created TensorFlow Lite XNNPACK delegate for CPU."
|
|
|
|
|
not in record.getMessage()
|
|
|
|
|
)
|
|
|
|
|
|
2025-06-13 17:43:38 +03:00
|
|
|
|
|
|
|
|
class LogLevel(str, Enum):
|
|
|
|
|
debug = "debug"
|
|
|
|
|
info = "info"
|
|
|
|
|
warning = "warning"
|
|
|
|
|
error = "error"
|
|
|
|
|
critical = "critical"
|
|
|
|
|
|
|
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
log_listener: Optional[QueueListener] = None
|
2025-05-29 18:02:17 +03:00
|
|
|
log_queue: Optional[Queue] = None
|
2023-04-26 14:08:53 +03:00
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
def setup_logging(manager: SyncManager) -> None:
|
|
|
|
|
global log_listener, log_queue
|
2025-05-29 18:02:17 +03:00
|
|
|
log_queue = manager.Queue()
|
2024-09-27 15:53:23 +03:00
|
|
|
log_listener = QueueListener(log_queue, LOG_HANDLER, respect_handler_level=True)
|
2023-04-26 14:08:53 +03:00
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
atexit.register(_stop_logging)
|
|
|
|
|
log_listener.start()
|
2020-11-04 06:26:39 +03:00
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
logging.basicConfig(
|
|
|
|
|
level=logging.INFO,
|
|
|
|
|
handlers=[],
|
|
|
|
|
force=True,
|
|
|
|
|
)
|
2023-02-04 05:15:47 +03:00
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
logging.getLogger().addHandler(QueueHandler(log_listener.queue))
|
2023-02-04 05:15:47 +03:00
|
|
|
|
|
|
|
|
|
2024-09-27 15:53:23 +03:00
|
|
|
def _stop_logging() -> None:
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
global log_listener
|
2024-09-27 15:53:23 +03:00
|
|
|
if log_listener is not None:
|
|
|
|
|
log_listener.stop()
|
|
|
|
|
log_listener = None
|
2020-12-04 15:59:03 +03:00
|
|
|
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2025-06-13 17:43:38 +03:00
|
|
|
def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
|
|
|
|
|
logging.getLogger().setLevel(default)
|
|
|
|
|
|
|
|
|
|
log_levels = {
|
|
|
|
|
"absl": LogLevel.error,
|
|
|
|
|
"httpx": LogLevel.error,
|
2025-12-19 01:12:10 +03:00
|
|
|
"h5py": LogLevel.error,
|
|
|
|
|
"keras": LogLevel.error,
|
2025-06-26 16:32:48 +03:00
|
|
|
"matplotlib": LogLevel.error,
|
2025-06-13 17:43:38 +03:00
|
|
|
"tensorflow": LogLevel.error,
|
2025-12-19 01:12:10 +03:00
|
|
|
"tensorflow.python": LogLevel.error,
|
2025-06-13 17:43:38 +03:00
|
|
|
"werkzeug": LogLevel.error,
|
|
|
|
|
"ws4py": LogLevel.error,
|
2025-12-19 01:12:10 +03:00
|
|
|
"PIL": LogLevel.warning,
|
|
|
|
|
"numba": LogLevel.warning,
|
2026-01-15 17:08:49 +03:00
|
|
|
"google_genai.models": LogLevel.warning,
|
2025-06-13 17:43:38 +03:00
|
|
|
**log_levels,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for log, level in log_levels.items():
|
|
|
|
|
logging.getLogger(log).setLevel(level.value.upper())
|
|
|
|
|
|
|
|
|
|
|
2024-09-24 15:07:47 +03:00
|
|
|
# When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the
|
|
|
|
|
# process is created after a thread (for example a logging thread) is created and the process fork
|
|
|
|
|
# happens while an internal lock is held, the stdout/err flush can cause a deadlock.
|
|
|
|
|
#
|
|
|
|
|
# https://github.com/python/cpython/issues/91776
|
|
|
|
|
def reopen_std_streams() -> None:
|
|
|
|
|
sys.stdout = os.fdopen(1, "w")
|
|
|
|
|
sys.stderr = os.fdopen(2, "w")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
os.register_at_fork(after_in_child=reopen_std_streams)
|
|
|
|
|
|
|
|
|
|
|
2020-12-04 15:59:03 +03:00
|
|
|
# based on https://codereview.stackexchange.com/a/17959
|
|
|
|
|
class LogPipe(threading.Thread):
|
2025-06-25 16:24:45 +03:00
|
|
|
def __init__(self, log_name: str, level: int = logging.ERROR):
|
2022-04-12 23:24:45 +03:00
|
|
|
"""Setup the object with a logger and start the thread"""
|
2024-10-03 04:35:46 +03:00
|
|
|
super().__init__(daemon=False)
|
2020-12-04 15:59:03 +03:00
|
|
|
self.logger = logging.getLogger(log_name)
|
2025-06-25 16:24:45 +03:00
|
|
|
self.level = level
|
2022-04-12 23:24:45 +03:00
|
|
|
self.deque: Deque[str] = deque(maxlen=100)
|
2020-12-04 15:59:03 +03:00
|
|
|
self.fdRead, self.fdWrite = os.pipe()
|
|
|
|
|
self.pipeReader = os.fdopen(self.fdRead)
|
|
|
|
|
self.start()
|
|
|
|
|
|
2022-11-02 15:00:54 +03:00
|
|
|
def cleanup_log(self, log: str) -> str:
|
|
|
|
|
"""Cleanup the log line to remove sensitive info and string tokens."""
|
|
|
|
|
log = clean_camera_user_pass(log).strip("\n")
|
|
|
|
|
return log
|
|
|
|
|
|
2022-04-12 23:24:45 +03:00
|
|
|
def fileno(self) -> int:
|
2021-02-17 16:23:32 +03:00
|
|
|
"""Return the write file descriptor of the pipe"""
|
2020-12-04 15:59:03 +03:00
|
|
|
return self.fdWrite
|
|
|
|
|
|
2022-04-12 23:24:45 +03:00
|
|
|
def run(self) -> None:
|
2021-02-17 16:23:32 +03:00
|
|
|
"""Run the thread, logging everything."""
|
|
|
|
|
for line in iter(self.pipeReader.readline, ""):
|
2022-11-02 15:00:54 +03:00
|
|
|
self.deque.append(self.cleanup_log(line))
|
2020-12-04 15:59:03 +03:00
|
|
|
|
|
|
|
|
self.pipeReader.close()
|
2021-02-17 16:23:32 +03:00
|
|
|
|
2022-04-12 23:24:45 +03:00
|
|
|
def dump(self) -> None:
|
2021-01-30 16:50:17 +03:00
|
|
|
while len(self.deque) > 0:
|
|
|
|
|
self.logger.log(self.level, self.deque.popleft())
|
2020-12-04 15:59:03 +03:00
|
|
|
|
2022-04-12 23:24:45 +03:00
|
|
|
def close(self) -> None:
|
2021-02-17 16:23:32 +03:00
|
|
|
"""Close the write end of the pipe."""
|
2021-01-03 22:41:02 +03:00
|
|
|
os.close(self.fdWrite)
|
2025-06-25 16:24:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class LogRedirect(io.StringIO):
|
|
|
|
|
"""
|
|
|
|
|
A custom file-like object to capture stdout and process it.
|
|
|
|
|
It extends io.StringIO to capture output and then processes it
|
|
|
|
|
line by line.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, logger_instance: logging.Logger, level: int):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.logger = logger_instance
|
|
|
|
|
self.log_level = level
|
|
|
|
|
self._line_buffer: list[str] = []
|
|
|
|
|
|
|
|
|
|
def write(self, s: Any) -> int:
|
|
|
|
|
if not isinstance(s, str):
|
|
|
|
|
s = str(s)
|
|
|
|
|
|
|
|
|
|
self._line_buffer.append(s)
|
|
|
|
|
|
|
|
|
|
# Process output line by line if a newline is present
|
|
|
|
|
if "\n" in s:
|
|
|
|
|
full_output = "".join(self._line_buffer)
|
|
|
|
|
lines = full_output.splitlines(keepends=True)
|
|
|
|
|
self._line_buffer = []
|
|
|
|
|
|
|
|
|
|
for line in lines:
|
|
|
|
|
if line.endswith("\n"):
|
|
|
|
|
self._process_line(line.rstrip("\n"))
|
|
|
|
|
else:
|
|
|
|
|
self._line_buffer.append(line)
|
|
|
|
|
|
|
|
|
|
return len(s)
|
|
|
|
|
|
|
|
|
|
def _process_line(self, line: str) -> None:
|
|
|
|
|
self.logger.log(self.log_level, line)
|
|
|
|
|
|
|
|
|
|
def flush(self) -> None:
|
|
|
|
|
if self._line_buffer:
|
|
|
|
|
full_output = "".join(self._line_buffer)
|
|
|
|
|
self._line_buffer = []
|
|
|
|
|
if full_output: # Only process if there's content
|
|
|
|
|
self._process_line(full_output)
|
|
|
|
|
|
|
|
|
|
def __enter__(self) -> "LogRedirect":
|
|
|
|
|
"""Context manager entry point."""
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
|
|
|
|
"""Context manager exit point. Ensures buffered content is flushed."""
|
|
|
|
|
self.flush()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
2025-06-26 16:32:48 +03:00
|
|
|
def __redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]:
|
2025-06-25 16:24:45 +03:00
|
|
|
"""Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue."""
|
|
|
|
|
stdout_fd = os.dup(1)
|
|
|
|
|
read_fd, write_fd = os.pipe()
|
|
|
|
|
os.dup2(write_fd, 1)
|
|
|
|
|
os.close(write_fd)
|
|
|
|
|
|
|
|
|
|
stop_event = threading.Event()
|
|
|
|
|
|
|
|
|
|
def reader() -> None:
|
|
|
|
|
"""Read from pipe and put lines in queue until stop_event is set."""
|
|
|
|
|
try:
|
|
|
|
|
with os.fdopen(read_fd, "r") as pipe:
|
|
|
|
|
while not stop_event.is_set():
|
|
|
|
|
line = pipe.readline()
|
|
|
|
|
if not line: # EOF
|
|
|
|
|
break
|
|
|
|
|
queue.put(line.strip())
|
|
|
|
|
except OSError as e:
|
|
|
|
|
queue.put(f"Reader error: {e}")
|
|
|
|
|
finally:
|
|
|
|
|
if not stop_event.is_set():
|
|
|
|
|
stop_event.set()
|
|
|
|
|
|
|
|
|
|
reader_thread = threading.Thread(target=reader, daemon=False)
|
|
|
|
|
reader_thread.start()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
yield
|
|
|
|
|
finally:
|
|
|
|
|
os.dup2(stdout_fd, 1)
|
|
|
|
|
os.close(stdout_fd)
|
|
|
|
|
stop_event.set()
|
|
|
|
|
reader_thread.join(timeout=1.0)
|
|
|
|
|
try:
|
|
|
|
|
os.close(read_fd)
|
|
|
|
|
except OSError:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any:
|
|
|
|
|
"""Decorator to redirect both Python sys.stdout/stderr and C-level stdout to logger."""
|
|
|
|
|
|
|
|
|
|
def decorator(func: Callable) -> Callable:
|
|
|
|
|
@wraps(func)
|
|
|
|
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
|
|
|
queue: Queue[str] = Queue()
|
|
|
|
|
|
|
|
|
|
log_redirect = LogRedirect(logger, level)
|
|
|
|
|
old_stdout = sys.stdout
|
|
|
|
|
old_stderr = sys.stderr
|
|
|
|
|
sys.stdout = log_redirect
|
|
|
|
|
sys.stderr = log_redirect
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Redirect C-level stdout
|
2025-06-26 16:32:48 +03:00
|
|
|
with __redirect_fd_to_queue(queue):
|
2025-06-25 16:24:45 +03:00
|
|
|
result = func(*args, **kwargs)
|
|
|
|
|
finally:
|
|
|
|
|
# Restore Python stdout/stderr
|
|
|
|
|
sys.stdout = old_stdout
|
|
|
|
|
sys.stderr = old_stderr
|
|
|
|
|
log_redirect.flush()
|
|
|
|
|
|
|
|
|
|
# Log C-level output from queue
|
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
logger.log(level, queue.get_nowait())
|
|
|
|
|
except Empty:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def suppress_os_output(func: Callable) -> Callable:
|
|
|
|
|
"""
|
|
|
|
|
A decorator that suppresses all output (stdout and stderr)
|
|
|
|
|
at the operating system file descriptor level for the decorated function.
|
|
|
|
|
This is useful for silencing noisy C/C++ libraries.
|
|
|
|
|
Note: This is a Unix-specific solution using os.dup2 and os.pipe.
|
|
|
|
|
It temporarily redirects file descriptors 1 (stdout) and 2 (stderr)
|
|
|
|
|
to a non-read pipe, effectively discarding their output.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@wraps(func)
|
|
|
|
|
def wrapper(*args: tuple, **kwargs: dict[str, Any]) -> Any:
|
|
|
|
|
# Save the original file descriptors for stdout (1) and stderr (2)
|
|
|
|
|
original_stdout_fd = os.dup(1)
|
|
|
|
|
original_stderr_fd = os.dup(2)
|
|
|
|
|
|
|
|
|
|
# Create dummy pipes. We only need the write ends to redirect to.
|
|
|
|
|
# The data written to these pipes will be discarded as nothing
|
|
|
|
|
# will read from the read ends.
|
|
|
|
|
devnull_read_fd, devnull_write_fd = os.pipe()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Redirect stdout (FD 1) and stderr (FD 2) to the write end of our dummy pipe
|
|
|
|
|
os.dup2(devnull_write_fd, 1) # Redirect stdout to devnull pipe
|
|
|
|
|
os.dup2(devnull_write_fd, 2) # Redirect stderr to devnull pipe
|
|
|
|
|
|
|
|
|
|
# Execute the original function
|
|
|
|
|
result = func(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
# Restore original stdout and stderr file descriptors (1 and 2)
|
|
|
|
|
# This is crucial to ensure normal printing resumes after the decorated function.
|
|
|
|
|
os.dup2(original_stdout_fd, 1)
|
|
|
|
|
os.dup2(original_stderr_fd, 2)
|
|
|
|
|
|
|
|
|
|
# Close all duplicated and pipe file descriptors to prevent resource leaks.
|
|
|
|
|
# It's important to close the read end of the dummy pipe too,
|
|
|
|
|
# as nothing is explicitly reading from it.
|
|
|
|
|
os.close(original_stdout_fd)
|
|
|
|
|
os.close(original_stderr_fd)
|
|
|
|
|
os.close(devnull_read_fd)
|
|
|
|
|
os.close(devnull_write_fd)
|
|
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
return wrapper
|
2025-12-19 01:12:10 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def suppress_stderr_during(operation_name: str) -> Generator[None, None, None]:
|
|
|
|
|
"""
|
|
|
|
|
Context manager to suppress stderr output during a specific operation.
|
|
|
|
|
|
|
|
|
|
Useful for silencing LLVM debug output, CUDA messages, and other native
|
|
|
|
|
library logging that cannot be controlled via Python logging or environment
|
|
|
|
|
variables. Completely redirects file descriptor 2 (stderr) to /dev/null.
|
|
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
|
with suppress_stderr_during("model_conversion"):
|
|
|
|
|
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
|
|
|
|
tflite_model = converter.convert()
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
operation_name: Name of the operation for debugging purposes
|
|
|
|
|
"""
|
|
|
|
|
original_stderr_fd = os.dup(2)
|
|
|
|
|
devnull = os.open(os.devnull, os.O_WRONLY)
|
|
|
|
|
try:
|
|
|
|
|
os.dup2(devnull, 2)
|
|
|
|
|
yield
|
|
|
|
|
finally:
|
|
|
|
|
os.dup2(original_stderr_fd, 2)
|
|
|
|
|
os.close(devnull)
|
|
|
|
|
os.close(original_stderr_fd)
|