Move log level initialization to log

This commit is contained in:
Nicolas Mowen 2025-06-13 07:18:26 -06:00
parent 8485023442
commit 900ecb2a15
2 changed files with 30 additions and 24 deletions

View File

@ -1,20 +1,12 @@
import logging
from enum import Enum
from pydantic import Field, ValidationInfo, model_validator
from typing_extensions import Self
from frigate.log import LogLevel, apply_log_levels
from .base import FrigateBaseModel
__all__ = ["LoggerConfig", "LogLevel"]
class LogLevel(str, Enum):
debug = "debug"
info = "info"
warning = "warning"
error = "error"
critical = "critical"
__all__ = ["LoggerConfig"]
class LoggerConfig(FrigateBaseModel):
@ -26,18 +18,6 @@ class LoggerConfig(FrigateBaseModel):
@model_validator(mode="after")
def post_validation(self, info: ValidationInfo) -> Self:
if isinstance(info.context, dict) and info.context.get("install", False):
logging.getLogger().setLevel(self.default.value.upper())
log_levels = {
"absl": LogLevel.error,
"httpx": LogLevel.error,
"tensorflow": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**self.logs,
}
for log, level in log_levels.items():
logging.getLogger(log).setLevel(level.value.upper())
apply_log_levels(self.default.value.upper(), self.logs)
return self

View File

@ -5,6 +5,7 @@ import os
import sys
import threading
from collections import deque
from enum import Enum
from logging.handlers import QueueHandler, QueueListener
from multiprocessing.managers import SyncManager
from queue import Queue
@ -33,6 +34,15 @@ LOG_HANDLER.addFilter(
not in record.getMessage()
)
class LogLevel(str, Enum):
debug = "debug"
info = "info"
warning = "warning"
error = "error"
critical = "critical"
log_listener: Optional[QueueListener] = None
log_queue: Optional[Queue] = None
@ -61,6 +71,22 @@ def _stop_logging() -> None:
log_listener = None
def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
logging.getLogger().setLevel(default)
log_levels = {
"absl": LogLevel.error,
"httpx": LogLevel.error,
"tensorflow": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**log_levels,
}
for log, level in log_levels.items():
logging.getLogger(log).setLevel(level.value.upper())
# When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the
# process is created after a thread (for example a logging thread) is created and the process fork
# happens while an internal lock is held, the stdout/err flush can cause a deadlock.