otel-declarative 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- otel_declarative/Config/__init__.py +0 -0
- otel_declarative/Config/extraction_config.py +79 -0
- otel_declarative/Engines/Strategies/__init__.py +0 -0
- otel_declarative/Engines/Strategies/converter_strategies.py +120 -0
- otel_declarative/Engines/__init__.py +0 -0
- otel_declarative/Engines/converter_registry.py +140 -0
- otel_declarative/Engines/generic_extractor.py +96 -0
- otel_declarative/Engines/log_processors.py +191 -0
- otel_declarative/Engines/model_registry.py +90 -0
- otel_declarative/Engines/object_hydrator.py +126 -0
- otel_declarative/Engines/path_resolver.py +111 -0
- otel_declarative/Enums/__init__.py +0 -0
- otel_declarative/Enums/converter_types.py +42 -0
- otel_declarative/Enums/extraction_source.py +19 -0
- otel_declarative/Factories/__init__.py +0 -0
- otel_declarative/Factories/extractor_factory.py +156 -0
- otel_declarative/Infrastructure/__init__.py +0 -0
- otel_declarative/Infrastructure/async_log_engine.py +164 -0
- otel_declarative/Infrastructure/handlers.py +39 -0
- otel_declarative/Interfaces/__init__.py +0 -0
- otel_declarative/Interfaces/extractor.py +50 -0
- otel_declarative/Logging/__init__.py +0 -0
- otel_declarative/Logging/logger_factory.py +185 -0
- otel_declarative/Models/Log/__init__.py +0 -0
- otel_declarative/Models/Log/constants.py +47 -0
- otel_declarative/Models/Log/context.py +105 -0
- otel_declarative/Models/Log/mapping.py +94 -0
- otel_declarative/Models/Log/state.py +59 -0
- otel_declarative/Models/Log/topology.py +202 -0
- otel_declarative/Models/__init__.py +0 -0
- otel_declarative/Models/engine_states.py +135 -0
- otel_declarative/Models/mapping_models.py +111 -0
- otel_declarative/Models/summary_models.py +104 -0
- otel_declarative/Reporters/__init__.py +0 -0
- otel_declarative/Reporters/structured_reporter.py +154 -0
- otel_declarative/__init__.py +13 -0
- otel_declarative/constants.py +79 -0
- otel_declarative/provider.py +217 -0
- otel_declarative/settings.py +150 -0
- otel_declarative-0.1.2.dist-info/METADATA +72 -0
- otel_declarative-0.1.2.dist-info/RECORD +44 -0
- otel_declarative-0.1.2.dist-info/WHEEL +5 -0
- otel_declarative-0.1.2.dist-info/licenses/LICENSE +21 -0
- otel_declarative-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
import queue
|
|
2
|
+
import logging
|
|
3
|
+
import atexit
|
|
4
|
+
import threading
|
|
5
|
+
from logging.handlers import QueueHandler, QueueListener
|
|
6
|
+
from typing import List, Optional, Dict, ClassVar
|
|
7
|
+
from otel_declarative.Models.Log.context import StructuredLogSettings
|
|
8
|
+
from otel_declarative.Logging.logger_factory import LoggerFactory, LogConfig
|
|
9
|
+
from otel_declarative.Infrastructure.handlers import NonBlockingQueueHandler
|
|
10
|
+
|
|
11
|
+
class AsyncLogInfrastructure:
|
|
12
|
+
"""
|
|
13
|
+
异步日志基础设施管理器
|
|
14
|
+
|
|
15
|
+
职责:
|
|
16
|
+
1、核心队列维护: 负责初始化线程安全的高性能 Queue 对象, 缓冲业务侧高频产生的日志事件
|
|
17
|
+
2、监听器生命周期管理: 管理后台 QueueListener 线程的启动与停机, 确保资源安全回收
|
|
18
|
+
3、同步 Sink 桥接: 利用 LoggerFactory 构造底层物理输出层, 并将其封装为异步消费端
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, settings: StructuredLogSettings, base_log_config: LogConfig):
|
|
22
|
+
"""
|
|
23
|
+
:param settings: 结构化日志引擎全局配置模型
|
|
24
|
+
:param base_log_config: 基础日志配置对象
|
|
25
|
+
"""
|
|
26
|
+
self._settings = settings
|
|
27
|
+
self._base_config: LogConfig = base_log_config
|
|
28
|
+
# 内部状态
|
|
29
|
+
self._queue: Optional[queue.Queue] = None
|
|
30
|
+
self._listener: Optional[QueueListener] = None
|
|
31
|
+
self._handler: Optional[QueueHandler] = None
|
|
32
|
+
self._is_active: bool = False
|
|
33
|
+
|
|
34
|
+
def build_async_handler(self) -> logging.Handler:
|
|
35
|
+
"""
|
|
36
|
+
构建并启动异步日志处理流水线
|
|
37
|
+
|
|
38
|
+
逻辑:
|
|
39
|
+
1、调用 LoggerFactory 构造底层的物理输出 Handler 集合
|
|
40
|
+
2、初始化线程安全的消息队列
|
|
41
|
+
3、启动后台 QueueListener 线程执行日志的物理落盘
|
|
42
|
+
4、向系统注册 atexit 钩子, 确保进程推出前强制刷新缓冲区
|
|
43
|
+
|
|
44
|
+
:return: 配置完成的 QueueHandler 实例, 用于挂载至 stdlib logging 节点
|
|
45
|
+
"""
|
|
46
|
+
# --- 1、构造物理输出层 ---
|
|
47
|
+
sink_bridge_logger: logging.Logger = LoggerFactory.setup_logger(self._base_config)
|
|
48
|
+
physical_handlers: List[logging.Handler] = sink_bridge_logger.handlers
|
|
49
|
+
if not physical_handlers:
|
|
50
|
+
# 若配置未开启任何输出, 返回 NullHandler 防止系统崩溃
|
|
51
|
+
return logging.NullHandler()
|
|
52
|
+
|
|
53
|
+
# --- 2、初始化异步队列 (队列深度由 StructureLogSettings 驱动) ---
|
|
54
|
+
self._queue = queue.Queue(maxsize=self._settings.queue_size)
|
|
55
|
+
|
|
56
|
+
# --- 3、构造并启动后台监听器 ---
|
|
57
|
+
# respect_handler_level=True 确保物理 Handler 的 Level 设置生效
|
|
58
|
+
self._listener = QueueListener(self._queue, *physical_handlers, respect_handler_level=True)
|
|
59
|
+
self._listener.start()
|
|
60
|
+
self._is_active = True
|
|
61
|
+
|
|
62
|
+
# --- 4、注册进程退出清理机制 ---
|
|
63
|
+
atexit.register(self.shutdown)
|
|
64
|
+
|
|
65
|
+
# --- 5、构造前端入队处理器
|
|
66
|
+
self._handler = NonBlockingQueueHandler(self._queue)
|
|
67
|
+
|
|
68
|
+
# [Fix 2026.01.15]
|
|
69
|
+
return self._handler
|
|
70
|
+
|
|
71
|
+
def shutdown(self) -> None:
|
|
72
|
+
"""
|
|
73
|
+
停止后台监听线程并确保队列中积压的日志记录完成最终分发
|
|
74
|
+
|
|
75
|
+
Fail-safe 机制:
|
|
76
|
+
1、优先尝试正常的 stop() 发送哨兵
|
|
77
|
+
2、若队列已满导致 Full 异常, 主动执行退避策略
|
|
78
|
+
- 丢弃最旧的日志以腾出空间
|
|
79
|
+
- 再次尝试发送哨兵
|
|
80
|
+
3、确保在极端积压场景下也能优雅退出, 避免阻塞主进程
|
|
81
|
+
"""
|
|
82
|
+
if not (self._listener and self._is_active):
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
if getattr(self._listener, "_thread", None) is None:
|
|
86
|
+
self._is_active = False
|
|
87
|
+
return
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
self._listener.stop()
|
|
91
|
+
except queue.Full:
|
|
92
|
+
max_retries: int = 3
|
|
93
|
+
for _ in range(max_retries):
|
|
94
|
+
try:
|
|
95
|
+
self._queue.get_nowait()
|
|
96
|
+
except queue.Empty:
|
|
97
|
+
pass
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
if getattr(self._listener, "_thread", None) is None:
|
|
101
|
+
break
|
|
102
|
+
self._listener.stop()
|
|
103
|
+
break
|
|
104
|
+
except queue.Full:
|
|
105
|
+
# 依然满 (高并发写入竞争), 继续重试
|
|
106
|
+
continue
|
|
107
|
+
else:
|
|
108
|
+
pass
|
|
109
|
+
except AttributeError:
|
|
110
|
+
# 处理 _thread 为 None 的情况
|
|
111
|
+
pass
|
|
112
|
+
|
|
113
|
+
self._is_active = False
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def is_running(self) -> bool:
|
|
117
|
+
"""
|
|
118
|
+
获取当前异步基础设施的运行状态标识
|
|
119
|
+
"""
|
|
120
|
+
return self._is_active
|
|
121
|
+
|
|
122
|
+
class AsyncInfrastructureRegistry:
|
|
123
|
+
"""
|
|
124
|
+
异步基础设施注册中心
|
|
125
|
+
|
|
126
|
+
职责:
|
|
127
|
+
1、单例控制: 确保每个 LogConfig 对应的异步管理器在全局范围内唯一
|
|
128
|
+
2、Fail-safe 访问: 提供安全的管理器获取入口, 封装初始化复杂度
|
|
129
|
+
"""
|
|
130
|
+
# 全局单例注册表
|
|
131
|
+
_managers: Dict[str, AsyncLogInfrastructure] = {}
|
|
132
|
+
# 线程同步原语: 可重入锁
|
|
133
|
+
_lock: ClassVar[threading.RLock] = threading.RLock()
|
|
134
|
+
|
|
135
|
+
@classmethod
|
|
136
|
+
def get_infrastructure(cls, settings: StructuredLogSettings, log_config: LogConfig) -> AsyncLogInfrastructure:
|
|
137
|
+
"""
|
|
138
|
+
根据配置标识获取或创建异步基础设施管理器
|
|
139
|
+
|
|
140
|
+
逻辑:
|
|
141
|
+
1、无锁读取
|
|
142
|
+
2、仅在实例不存在时竞争锁
|
|
143
|
+
3、获取锁后再次检查, 防止重复初始化
|
|
144
|
+
4、安全执行带有副作用的初始化逻辑
|
|
145
|
+
|
|
146
|
+
:param settings: 结构化日志设施
|
|
147
|
+
:param log_config: 基础日志配置
|
|
148
|
+
:return: 活跃且唯一的 AsyncLogInfrastructure 实例
|
|
149
|
+
"""
|
|
150
|
+
# 使用服务名作为注册索引键
|
|
151
|
+
registry_key: str = log_config.service_name
|
|
152
|
+
|
|
153
|
+
if registry_key in cls._managers:
|
|
154
|
+
return cls._managers[registry_key]
|
|
155
|
+
|
|
156
|
+
with cls._lock:
|
|
157
|
+
if registry_key not in cls._managers:
|
|
158
|
+
instance = AsyncLogInfrastructure(
|
|
159
|
+
settings=settings,
|
|
160
|
+
base_log_config=log_config
|
|
161
|
+
)
|
|
162
|
+
cls._managers[registry_key] = instance
|
|
163
|
+
|
|
164
|
+
return cls._managers[registry_key]
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import queue
|
|
2
|
+
import logging
|
|
3
|
+
from typing import cast
|
|
4
|
+
from logging.handlers import QueueHandler
|
|
5
|
+
|
|
6
|
+
class NonBlockingQueueHandler(QueueHandler):
|
|
7
|
+
"""
|
|
8
|
+
非阻塞式异步队列处理器
|
|
9
|
+
|
|
10
|
+
职责:
|
|
11
|
+
1、扩展标准库 QueueHandler, 重写核心入队逻辑
|
|
12
|
+
2、实现 '队列满时自动丢弃最旧日志' 策略, 确保新日志优先
|
|
13
|
+
3、消除 queue.Full 异常的抛出, 保证 stderr 的清洁与业务线程的零阻塞
|
|
14
|
+
"""
|
|
15
|
+
def enqueue(self, record: logging.LogRecord) -> None:
|
|
16
|
+
"""
|
|
17
|
+
执行非阻塞入队操作
|
|
18
|
+
|
|
19
|
+
逻辑:
|
|
20
|
+
1、立即尝试入队
|
|
21
|
+
2、若队列已满:
|
|
22
|
+
a、尝试从队列头部移除一个最旧元素以腾出空间
|
|
23
|
+
b、再次尝试将当前新记录入队
|
|
24
|
+
3、若在极端并发竞争下第二次入队依然失败, 则静默丢弃当前记录, 不抛出任何错误
|
|
25
|
+
|
|
26
|
+
:param record: 待处理的日志记录对象
|
|
27
|
+
"""
|
|
28
|
+
try:
|
|
29
|
+
self.queue.put_nowait(record)
|
|
30
|
+
except queue.Full:
|
|
31
|
+
try:
|
|
32
|
+
cast(queue.Queue, self.queue).get_nowait()
|
|
33
|
+
except (queue.Empty, AttributeError):
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
self.queue.put_nowait(record)
|
|
38
|
+
except queue.Full:
|
|
39
|
+
pass
|
|
File without changes
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from typing import Any, Protocol, Type, Optional, runtime_checkable
|
|
2
|
+
from otel_declarative.Models.summary_models import InputSummary, OutputSummary
|
|
3
|
+
|
|
4
|
+
@runtime_checkable
|
|
5
|
+
class IExtractor(Protocol):
|
|
6
|
+
"""
|
|
7
|
+
元数据提取器接口协议
|
|
8
|
+
|
|
9
|
+
定义从原始业务载体中提取标准化观测摘要的标准行为
|
|
10
|
+
"""
|
|
11
|
+
def extract_input(self, args: Any, kwargs: Any) -> InputSummary:
|
|
12
|
+
"""
|
|
13
|
+
从被装饰函数的原始调用参数中提取标准化输入摘要
|
|
14
|
+
|
|
15
|
+
职责:
|
|
16
|
+
1、识别并解析 args / kwargs 中的业务载体
|
|
17
|
+
2、负责防御性参数校验, 确保提取逻辑不会感染主业务流程
|
|
18
|
+
3、构造并返回 InputSummary 强类型对象
|
|
19
|
+
|
|
20
|
+
:param args: 函数位置参数元组
|
|
21
|
+
:param kwargs: 函数关键字参数字典
|
|
22
|
+
:return: 填充后的标准化输入摘要模型实例
|
|
23
|
+
"""
|
|
24
|
+
...
|
|
25
|
+
|
|
26
|
+
def extract_output(self, result: Any) -> OutputSummary:
|
|
27
|
+
"""
|
|
28
|
+
从被装饰函数的执行结果中提取标准化输出摘要
|
|
29
|
+
|
|
30
|
+
职责:
|
|
31
|
+
1、解析业务返回值
|
|
32
|
+
2、提取跨系统追踪的关键标识符
|
|
33
|
+
3、评估业务执行的逻辑结果简述
|
|
34
|
+
|
|
35
|
+
:param result: 被装饰函数执行后的返回值对象
|
|
36
|
+
:return: 填充后的标准化输出摘要模型实例
|
|
37
|
+
"""
|
|
38
|
+
...
|
|
39
|
+
|
|
40
|
+
def supports(self, layer: str, payload_type: Optional[Type[Any]] = None) -> bool:
|
|
41
|
+
"""
|
|
42
|
+
策略自发现标识: 判定当前提取器是否能够处理指定的逻辑层级或数据载体类型
|
|
43
|
+
|
|
44
|
+
用于 ExtractorFactory 在运行时根据上下文动态选择合适的策略类
|
|
45
|
+
|
|
46
|
+
:param layer:
|
|
47
|
+
:param payload_type:
|
|
48
|
+
:return:
|
|
49
|
+
"""
|
|
50
|
+
...
|
|
File without changes
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
import sys
|
|
4
|
+
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import Optional
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
class LogLevel(Enum):
|
|
10
|
+
"""
|
|
11
|
+
日志级别枚举
|
|
12
|
+
"""
|
|
13
|
+
DEBUG = "DEBUG"
|
|
14
|
+
INFO = "INFO"
|
|
15
|
+
WARNING = "WARNING"
|
|
16
|
+
ERROR = "ERROR"
|
|
17
|
+
CRITICAL = "CRITICAL"
|
|
18
|
+
|
|
19
|
+
def to_logging_level(self) -> int:
|
|
20
|
+
return logging.getLevelName(self.value)
|
|
21
|
+
|
|
22
|
+
class RotationType(str, Enum):
|
|
23
|
+
"""
|
|
24
|
+
日志轮转策略枚举
|
|
25
|
+
"""
|
|
26
|
+
# 不输出到文件
|
|
27
|
+
NONE = "none"
|
|
28
|
+
# 按文件大小轮转
|
|
29
|
+
SIZE = "size"
|
|
30
|
+
# 按时间间隔轮转
|
|
31
|
+
TIME = "time"
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class LogConfig:
|
|
35
|
+
"""
|
|
36
|
+
日志系统配置类
|
|
37
|
+
"""
|
|
38
|
+
# --- 基础标识 ---
|
|
39
|
+
service_name: str
|
|
40
|
+
|
|
41
|
+
# --- 输出控制 ---
|
|
42
|
+
level: LogLevel = LogLevel.INFO
|
|
43
|
+
# 是否输出到标准输出
|
|
44
|
+
enable_console: bool = True
|
|
45
|
+
# 是否输出到文件
|
|
46
|
+
enable_file: bool = True
|
|
47
|
+
|
|
48
|
+
# --- 文件路径配置 (仅当 enable_file = True 时生效)
|
|
49
|
+
log_dir: str = "/var/log/app"
|
|
50
|
+
file_name: Optional[str] = None
|
|
51
|
+
|
|
52
|
+
# --- 轮转策略配置 ---
|
|
53
|
+
rotation_type: RotationType = RotationType.NONE
|
|
54
|
+
|
|
55
|
+
# Size 策略参数
|
|
56
|
+
max_bytes: int = 10 * 1024 * 1024
|
|
57
|
+
backup_count: int = 5
|
|
58
|
+
|
|
59
|
+
# Time 策略参数
|
|
60
|
+
# S: Seconds, M: Minutes, H: Hours, D: Days, midnight: roll over at midnight
|
|
61
|
+
when: str = "midnight"
|
|
62
|
+
interval: int = 1
|
|
63
|
+
# 是否使用 UTC 时间
|
|
64
|
+
utc: bool = False
|
|
65
|
+
|
|
66
|
+
# --- 格式配置 ---
|
|
67
|
+
# 默认格式串,可由配置文件覆盖
|
|
68
|
+
format_string: str = (
|
|
69
|
+
"%(asctime)s | %(levelname)-8s | %(name)s | "
|
|
70
|
+
"thread:%(thread)d | %(filename)s:%(lineno)d | %(message)s"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
def get_log_file_path(self) -> str:
|
|
74
|
+
"""
|
|
75
|
+
计算完整的日志文件绝对路径
|
|
76
|
+
"""
|
|
77
|
+
fname = self.file_name or f"{self.service_name}.log"
|
|
78
|
+
return os.path.join(self.log_dir, fname)
|
|
79
|
+
|
|
80
|
+
class LoggerFactory:
|
|
81
|
+
"""
|
|
82
|
+
日志工厂类
|
|
83
|
+
负责根据传入的配置对象构建和组装标准化的 Logger 实例
|
|
84
|
+
"""
|
|
85
|
+
@staticmethod
|
|
86
|
+
def setup_logger(config: LogConfig) -> logging.Logger:
|
|
87
|
+
"""
|
|
88
|
+
根据配置文件初始化并获得 Logger 实例
|
|
89
|
+
|
|
90
|
+
:param config: 完整的日志配置对象
|
|
91
|
+
:return: 配置完成的 Logger
|
|
92
|
+
"""
|
|
93
|
+
# --- 1、获取 Logger 对象 ---
|
|
94
|
+
logger = logging.getLogger(config.service_name)
|
|
95
|
+
logger.setLevel(config.level.to_logging_level())
|
|
96
|
+
|
|
97
|
+
# 屏蔽第三方库的底层详细日志
|
|
98
|
+
silenced_loggers = [
|
|
99
|
+
"httpcore", "httpx", "hpack", "anyio",
|
|
100
|
+
"httpcore._trace", "httpcore._client"
|
|
101
|
+
]
|
|
102
|
+
for logger_name in silenced_loggers:
|
|
103
|
+
logging.getLogger(logger_name).setLevel(logging.WARNING)
|
|
104
|
+
|
|
105
|
+
# 清除已有的处理器
|
|
106
|
+
if logger.handlers:
|
|
107
|
+
logger.handlers.clear()
|
|
108
|
+
|
|
109
|
+
# 防止日志传播到根记录器导致双重打印
|
|
110
|
+
logger.propagate = False
|
|
111
|
+
|
|
112
|
+
# --- 2、创建统一格式化器 ---
|
|
113
|
+
formatter = logging.Formatter(config.format_string)
|
|
114
|
+
|
|
115
|
+
# --- 3、配置控制太处理器 ---
|
|
116
|
+
if config.enable_console:
|
|
117
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
|
118
|
+
console_handler.setFormatter(formatter)
|
|
119
|
+
logger.addHandler(console_handler)
|
|
120
|
+
|
|
121
|
+
# --- 4、配置从文件处理器 ---
|
|
122
|
+
if config.enable_file and config.rotation_type != RotationType.NONE:
|
|
123
|
+
file_handler = LoggerFactory._create_file_handler(config)
|
|
124
|
+
if file_handler:
|
|
125
|
+
file_handler.setFormatter(formatter)
|
|
126
|
+
logger.addHandler(file_handler)
|
|
127
|
+
|
|
128
|
+
return logger
|
|
129
|
+
|
|
130
|
+
@staticmethod
|
|
131
|
+
def _create_file_handler(config: LogConfig) -> Optional[logging.Handler]:
|
|
132
|
+
"""
|
|
133
|
+
根据 RotationType 创建相应的文件处理器
|
|
134
|
+
|
|
135
|
+
:param config: 日志系统配置对象
|
|
136
|
+
"""
|
|
137
|
+
# --- 1、前置检查: 确保目录存在 ---
|
|
138
|
+
if not os.path.exists(config.log_dir):
|
|
139
|
+
try:
|
|
140
|
+
os.makedirs(config.log_dir, exist_ok=True)
|
|
141
|
+
except OSError as e:
|
|
142
|
+
sys.stderr.write(f"CRITICAL: Logger 创建日志目录 {config.log_dir} 失败: {e}\n")
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
file_path = config.get_log_file_path()
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
handler: logging.Handler
|
|
149
|
+
|
|
150
|
+
if config.rotation_type == RotationType.SIZE:
|
|
151
|
+
# 按大小切割
|
|
152
|
+
handler = RotatingFileHandler(
|
|
153
|
+
filename=file_path,
|
|
154
|
+
mode="a",
|
|
155
|
+
maxBytes=config.max_bytes,
|
|
156
|
+
backupCount=config.backup_count,
|
|
157
|
+
encoding="utf-8",
|
|
158
|
+
delay=True # 延迟打开文件, 直到第一条日志写入
|
|
159
|
+
)
|
|
160
|
+
elif config.rotation_type == RotationType.TIME:
|
|
161
|
+
# 按时间切割
|
|
162
|
+
handler = TimedRotatingFileHandler(
|
|
163
|
+
filename=file_path,
|
|
164
|
+
when=config.when,
|
|
165
|
+
interval=config.interval,
|
|
166
|
+
backupCount=config.backup_count,
|
|
167
|
+
encoding="utf-8",
|
|
168
|
+
utc=config.utc,
|
|
169
|
+
delay=True
|
|
170
|
+
)
|
|
171
|
+
else:
|
|
172
|
+
return None
|
|
173
|
+
except Exception as e:
|
|
174
|
+
sys.stderr.write(f"CRICTL: 初始化文件 {file_path} 的文件句柄失败: {e}\n")
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
def get_child_logger(parent_logger_name: str, child_suffix: str) -> logging.Logger:
|
|
178
|
+
"""
|
|
179
|
+
获取子模块 Logger 的辅助函数
|
|
180
|
+
|
|
181
|
+
:param parent_logger_name: 父 Logger 名称 (例如 backfill)
|
|
182
|
+
:param child_suffix: 子模块后缀 (例如 controllers.job)
|
|
183
|
+
:return: 名称为 backfill.controllers.job 的 Logger
|
|
184
|
+
"""
|
|
185
|
+
return logging.getLogger(f"{parent_logger_name}.{child_suffix}")
|
|
File without changes
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import unique, Enum
|
|
3
|
+
from typing import runtime_checkable, Protocol, Any, Dict
|
|
4
|
+
|
|
5
|
+
# --- 基础契约模块 ---
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True)
|
|
8
|
+
class FieldContract:
|
|
9
|
+
"""
|
|
10
|
+
定义字段的观测协议元数据
|
|
11
|
+
"""
|
|
12
|
+
source_key: str = ""
|
|
13
|
+
is_renamable: bool = False
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@unique
|
|
17
|
+
class LogFormat(str, Enum):
|
|
18
|
+
"""
|
|
19
|
+
结构化日志输出格式枚举
|
|
20
|
+
|
|
21
|
+
职责:
|
|
22
|
+
1、定义渲染器的策略标识
|
|
23
|
+
2、区分面向人类阅读的控制台模式与面向机器处理的 JSON 模式
|
|
24
|
+
"""
|
|
25
|
+
# 适合开发环境, 带有颜色与缩进
|
|
26
|
+
CONSOLE = "console"
|
|
27
|
+
# 适合生产环境, 标准 JSON 行格式
|
|
28
|
+
JSON = "json"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@runtime_checkable
|
|
32
|
+
class ILogProcessor(Protocol):
|
|
33
|
+
"""
|
|
34
|
+
结构化日志处理器接口协议
|
|
35
|
+
|
|
36
|
+
职责: 定义标准 structlog 中间件必须遵循的函数签名契约
|
|
37
|
+
"""
|
|
38
|
+
def __call__(self, logger: Any, method_name: str, event_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
39
|
+
"""
|
|
40
|
+
执行日志事件处理逻辑
|
|
41
|
+
|
|
42
|
+
:param logger: 当前绑定的 Logger 实例 (BindableLogger、stdlib Logger 等)
|
|
43
|
+
:param method_name: 触发日志的方法名
|
|
44
|
+
:param event_dict: 当前上下文累积的日志数据字典
|
|
45
|
+
:return: 处理或增强后的新事件字典
|
|
46
|
+
"""
|
|
47
|
+
...
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
from typing import Optional, Dict, Any
|
|
2
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
3
|
+
from otel_declarative.Models.Log.constants import LogFormat
|
|
4
|
+
from otel_declarative.Models.Log.mapping import LogFieldMapping
|
|
5
|
+
|
|
6
|
+
# --- 上下文与设置模块 ---
|
|
7
|
+
|
|
8
|
+
class LogContext(BaseModel):
|
|
9
|
+
"""
|
|
10
|
+
结构化日志强制上下文模型
|
|
11
|
+
|
|
12
|
+
职责:
|
|
13
|
+
1、定义每一行日志必须携带的 '全链路追踪' 核心字段
|
|
14
|
+
2、作为 OtelTraceContextProcessor 注入数据的强类型契约
|
|
15
|
+
"""
|
|
16
|
+
model_config = ConfigDict(
|
|
17
|
+
frozen=True,
|
|
18
|
+
populate_by_name=True,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
trace_id: Optional[str] = Field(
|
|
22
|
+
default=None,
|
|
23
|
+
alias="trace_id",
|
|
24
|
+
description="OpenTelemetry 全局追踪标识符",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
span_id: Optional[str] = Field(
|
|
28
|
+
default=None,
|
|
29
|
+
alias="span_id",
|
|
30
|
+
description="OpenTelemetry 当前跨度标识符"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
service_name: str = Field(
|
|
34
|
+
...,
|
|
35
|
+
alias="service",
|
|
36
|
+
description="产生日志的服务 / 应用的唯一名称"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
environment: str = Field(
|
|
40
|
+
default="production",
|
|
41
|
+
description="部署环境标识 (例如: development, production)"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
node_name: Optional[str] = Field(
|
|
45
|
+
default=None,
|
|
46
|
+
description="逻辑节点或 Pod 名称"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class StructuredLogSettings(BaseModel):
|
|
51
|
+
"""
|
|
52
|
+
结构化日志引擎全局设置模型
|
|
53
|
+
|
|
54
|
+
职责:
|
|
55
|
+
1、封装所有影响 structlog 初始化与运行行为的参数
|
|
56
|
+
2、解耦日志逻辑与底层 I/O 基础设施
|
|
57
|
+
"""
|
|
58
|
+
model_config = ConfigDict(
|
|
59
|
+
frozen=True,
|
|
60
|
+
extra="ignore"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# --- 渲染控制 ---
|
|
64
|
+
log_format: LogFormat = Field(
|
|
65
|
+
default=LogFormat.JSON,
|
|
66
|
+
description="日志渲染格式策略选择"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# --- 异步性能控制 ---
|
|
70
|
+
enable_async: bool = Field(
|
|
71
|
+
default=True,
|
|
72
|
+
description="是否启用基于 QueueHandler 的异步写入引擎"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
queue_size: int = Field(
|
|
76
|
+
default=1000,
|
|
77
|
+
ge=100,
|
|
78
|
+
description="异步日志缓冲区队列容量上限"
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# --- 上下文注入控制 ---
|
|
82
|
+
inject_otel_context: bool = Field(
|
|
83
|
+
default=True,
|
|
84
|
+
description="是否自动调用处理器注入 TraceID 与 SpanID"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# --- 字段映射控制 ---
|
|
88
|
+
# 允许在不修改代码的情况下, 动态映射 structlog 默认字段名
|
|
89
|
+
# 例如: 将 'event' 映射为 'message' 以适配特定 ELK 索引
|
|
90
|
+
field_mapping: LogFieldMapping = Field(
|
|
91
|
+
default_factory=LogFieldMapping,
|
|
92
|
+
description="结构化字段名重映射配置对象, 支持嵌套校验"
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def to_structlog_processors_config(self) -> Dict[str, Any]:
|
|
96
|
+
"""
|
|
97
|
+
将模型转换为 structlog 配置所需的字典快照
|
|
98
|
+
|
|
99
|
+
:return: 包含关键开关的字典
|
|
100
|
+
"""
|
|
101
|
+
return {
|
|
102
|
+
"format": self.log_format.value,
|
|
103
|
+
"async_enabled": self.enable_async,
|
|
104
|
+
"mapping": self.field_mapping
|
|
105
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from typing import Annotated, Dict, Any, Set
|
|
2
|
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
3
|
+
from otel_declarative.Models.Log.constants import FieldContract
|
|
4
|
+
|
|
5
|
+
# --- 映射规则模块 ---
|
|
6
|
+
|
|
7
|
+
class LogFieldMapping(BaseModel):
|
|
8
|
+
"""
|
|
9
|
+
日志标准字段重映射模型
|
|
10
|
+
|
|
11
|
+
职责:
|
|
12
|
+
1、契约定义: 利用 Annotated 语法将业务配置与底层 FieldContract 观测契约进行原子化绑定
|
|
13
|
+
2、静态校验: 强制执行输出键名的唯一性检查, 防止日志数据覆盖
|
|
14
|
+
3、引擎导航: 为 LogFieldRenamer 提供高性能的迁移矩阵预计算基础
|
|
15
|
+
"""
|
|
16
|
+
model_config = ConfigDict(
|
|
17
|
+
frozen=True,
|
|
18
|
+
populate_by_name=True,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# --- 基础字段 ---
|
|
22
|
+
event: Annotated[
|
|
23
|
+
str,
|
|
24
|
+
Field(default="event", description="业务事件消息的输出键名"),
|
|
25
|
+
FieldContract(source_key="event", is_renamable=True),
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
timestamp: Annotated[
|
|
29
|
+
str,
|
|
30
|
+
Field(default="@timestamp", description="日志产生时间的输出键名"),
|
|
31
|
+
FieldContract(source_key="timestamp", is_renamable=True),
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
level: Annotated[
|
|
35
|
+
str,
|
|
36
|
+
Field(default="level", description="日志级别的输出键名"),
|
|
37
|
+
FieldContract(source_key="level", is_renamable=True),
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
logger: Annotated[
|
|
41
|
+
str,
|
|
42
|
+
Field(default="logger", description="日志记录器名称的输出键名"),
|
|
43
|
+
FieldContract(source_key="logger", is_renamable=True)
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
# --- OTel 追踪字段 ---
|
|
47
|
+
trace_id: Annotated[
|
|
48
|
+
str,
|
|
49
|
+
Field(default="trace_id", description="注入的 OTel TraceID 输出键名"),
|
|
50
|
+
FieldContract(is_renamable=False)
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
span_id: Annotated[
|
|
54
|
+
str,
|
|
55
|
+
Field(default="span_id", description="注入的 OTel SpanID 输出键名"),
|
|
56
|
+
FieldContract(is_renamable=False)
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
# --- 服务元数据字段 ---
|
|
60
|
+
service: Annotated[str,
|
|
61
|
+
Field(default="service", description="服务唯一名称的输出键名"),
|
|
62
|
+
FieldContract(is_renamable=False)
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
environment: Annotated[str,
|
|
66
|
+
Field(default="environment", description="运行环境标识的输出键名"),
|
|
67
|
+
FieldContract(is_renamable=False)
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
pod_name: Annotated[str,
|
|
71
|
+
Field(default="pod_name", description="K8S Pod/节点名称的输出键名"),
|
|
72
|
+
FieldContract(is_renamable=False)
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
@model_validator(mode="after")
|
|
76
|
+
def validate_no_target_key_collision(self) -> "LogFieldMapping":
|
|
77
|
+
"""
|
|
78
|
+
后验校验: 确保所有的目标键名在日志 Schema 中具有唯一性
|
|
79
|
+
|
|
80
|
+
职责: 防止用户在 YAML 配置中错误的将两个不同的字段映射到同一个 Key, 导致日志数据覆盖
|
|
81
|
+
|
|
82
|
+
:return: 校验后的实例
|
|
83
|
+
:raises: ValueError - 当检测到键名冲突时抛出
|
|
84
|
+
"""
|
|
85
|
+
target_keys: Dict[str, Any] = self.model_dump()
|
|
86
|
+
seen_keys: Set[str] = set()
|
|
87
|
+
for attr_name, target_key in target_keys.items():
|
|
88
|
+
if target_key in seen_keys:
|
|
89
|
+
raise ValueError(
|
|
90
|
+
f"日志字段映射冲突: 多个配置项指向了同一个目标键名 '{target_key}'"
|
|
91
|
+
f"请检查模型属性 '{attr_name}' 的配置"
|
|
92
|
+
)
|
|
93
|
+
seen_keys.add(target_key)
|
|
94
|
+
return self
|