sycommon-python-lib 0.1.46__py3-none-any.whl → 0.1.57b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. sycommon/config/Config.py +29 -4
  2. sycommon/config/LangfuseConfig.py +15 -0
  3. sycommon/config/RerankerConfig.py +1 -0
  4. sycommon/config/SentryConfig.py +13 -0
  5. sycommon/database/async_base_db_service.py +36 -0
  6. sycommon/database/async_database_service.py +96 -0
  7. sycommon/llm/__init__.py +0 -0
  8. sycommon/llm/embedding.py +204 -0
  9. sycommon/llm/get_llm.py +37 -0
  10. sycommon/llm/llm_logger.py +126 -0
  11. sycommon/llm/llm_tokens.py +119 -0
  12. sycommon/llm/struct_token.py +192 -0
  13. sycommon/llm/sy_langfuse.py +103 -0
  14. sycommon/llm/usage_token.py +117 -0
  15. sycommon/logging/async_sql_logger.py +65 -0
  16. sycommon/logging/kafka_log.py +200 -434
  17. sycommon/logging/logger_levels.py +23 -0
  18. sycommon/middleware/context.py +2 -0
  19. sycommon/middleware/exception.py +10 -16
  20. sycommon/middleware/timeout.py +2 -1
  21. sycommon/middleware/traceid.py +179 -51
  22. sycommon/notice/__init__.py +0 -0
  23. sycommon/notice/uvicorn_monitor.py +200 -0
  24. sycommon/rabbitmq/rabbitmq_client.py +267 -290
  25. sycommon/rabbitmq/rabbitmq_pool.py +277 -465
  26. sycommon/rabbitmq/rabbitmq_service.py +23 -891
  27. sycommon/rabbitmq/rabbitmq_service_client_manager.py +211 -0
  28. sycommon/rabbitmq/rabbitmq_service_connection_monitor.py +73 -0
  29. sycommon/rabbitmq/rabbitmq_service_consumer_manager.py +285 -0
  30. sycommon/rabbitmq/rabbitmq_service_core.py +117 -0
  31. sycommon/rabbitmq/rabbitmq_service_producer_manager.py +238 -0
  32. sycommon/sentry/__init__.py +0 -0
  33. sycommon/sentry/sy_sentry.py +35 -0
  34. sycommon/services.py +144 -115
  35. sycommon/synacos/feign.py +18 -7
  36. sycommon/synacos/feign_client.py +26 -8
  37. sycommon/synacos/nacos_client_base.py +119 -0
  38. sycommon/synacos/nacos_config_manager.py +107 -0
  39. sycommon/synacos/nacos_heartbeat_manager.py +144 -0
  40. sycommon/synacos/nacos_service.py +65 -769
  41. sycommon/synacos/nacos_service_discovery.py +157 -0
  42. sycommon/synacos/nacos_service_registration.py +270 -0
  43. sycommon/tools/env.py +62 -0
  44. sycommon/tools/merge_headers.py +117 -0
  45. sycommon/tools/snowflake.py +238 -23
  46. {sycommon_python_lib-0.1.46.dist-info → sycommon_python_lib-0.1.57b1.dist-info}/METADATA +18 -11
  47. sycommon_python_lib-0.1.57b1.dist-info/RECORD +89 -0
  48. sycommon_python_lib-0.1.46.dist-info/RECORD +0 -59
  49. {sycommon_python_lib-0.1.46.dist-info → sycommon_python_lib-0.1.57b1.dist-info}/WHEEL +0 -0
  50. {sycommon_python_lib-0.1.46.dist-info → sycommon_python_lib-0.1.57b1.dist-info}/entry_points.txt +0 -0
  51. {sycommon_python_lib-0.1.46.dist-info → sycommon_python_lib-0.1.57b1.dist-info}/top_level.txt +0 -0
@@ -1,21 +1,18 @@
1
1
  import os
2
- import pprint
3
2
  import sys
4
- import traceback
5
- import asyncio
6
- import atexit
7
- from datetime import datetime
8
3
  import json
9
- import re
10
4
  import socket
11
- import time
12
5
  import threading
13
- from queue import Queue, Full, Empty
6
+ import traceback
7
+ import asyncio
8
+ from datetime import datetime
9
+
14
10
  from kafka import KafkaProducer
15
11
  from loguru import logger
16
- import loguru
12
+
17
13
  from sycommon.config.Config import Config, SingletonMeta
18
- from sycommon.middleware.context import current_trace_id
14
+ from sycommon.middleware.context import current_trace_id, current_headers
15
+ from sycommon.tools.env import check_env_flag
19
16
  from sycommon.tools.snowflake import Snowflake
20
17
 
21
18
  # 配置Loguru的颜色方案
@@ -27,480 +24,270 @@ LOGURU_FORMAT = (
27
24
  )
28
25
 
29
26
 
30
- class KafkaLogger(metaclass=SingletonMeta):
31
- _producer = None
32
- _topic = None
33
- _service_id = None
34
- _log_queue = Queue(maxsize=10000)
35
- _stop_event = threading.Event()
36
- _sender_thread = None
37
- _log_pattern = re.compile(
38
- r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+)\s*\|\s*(\w+)\s*\|\s*(\S+):(\S+):(\d+)\s*-\s*(\{.*\})\s*$'
39
- )
40
- _queue_warning_threshold = 9000
41
- _queue_warning_interval = 60 # 秒
42
- _last_queue_warning = 0
43
- _shutdown_timeout = 15 # 关闭超时时间,秒
44
- _config = None # 配置变量存储
45
-
46
- @staticmethod
47
- def setup_logger(config: dict):
48
- # 保存配置到类变量
49
- KafkaLogger._config = config
27
+ class KafkaSink:
28
+ """
29
+ 自定义 Loguru Sink,负责格式化日志并发送到 Kafka
30
+ """
50
31
 
32
+ def __init__(self, service_id: str):
33
+ self.service_id = service_id
34
+ # 获取配置
51
35
  from sycommon.synacos.nacos_service import NacosService
52
- KafkaLogger._topic = "shengye-json-log"
53
- KafkaLogger._service_id = NacosService(config).service_name
54
-
55
- # 获取 common 配置
56
- common = NacosService(config).share_configs.get("common.yml", {})
36
+ common = NacosService(
37
+ Config().config).share_configs.get("common.yml", {})
57
38
  bootstrap_servers = common.get("log", {}).get(
58
39
  "kafka", {}).get("servers", None)
59
40
 
60
- # 创建生产者,优化配置参数
61
- KafkaLogger._producer = KafkaProducer(
41
+ self._producer = KafkaProducer(
62
42
  bootstrap_servers=bootstrap_servers,
63
43
  value_serializer=lambda v: json.dumps(
64
44
  v, ensure_ascii=False).encode('utf-8'),
65
- max_block_ms=60000, # 增加最大阻塞时间从30秒到60秒
66
- retries=10, # 增加重试次数从5次到10次
67
- request_timeout_ms=30000, # 增加请求超时时间从10秒到30秒
68
- compression_type='gzip', # 添加压缩以减少网络传输量
69
- batch_size=16384, # 增大批处理大小
70
- linger_ms=5, # 添加短暂延迟以允许更多消息批处理
71
- buffer_memory=67108864, # 增大缓冲区内存
72
- connections_max_idle_ms=540000, # 连接最大空闲时间
73
- reconnect_backoff_max_ms=10000, # 增加重连退避最大时间
74
- max_in_flight_requests_per_connection=1, # 限制单个连接上未确认的请求数量
75
- # enable_idempotence=True, # 开启幂等性
45
+ # 保持原有的优化配置
46
+ max_block_ms=60000,
47
+ retries=5,
48
+ request_timeout_ms=30000,
49
+ compression_type='gzip',
50
+ batch_size=16384,
51
+ linger_ms=5,
52
+ buffer_memory=33554432,
76
53
  )
77
54
 
78
- # 启动后台发送线程
79
- KafkaLogger._sender_thread = threading.Thread(
80
- target=KafkaLogger._send_logs,
81
- daemon=True
82
- )
83
- KafkaLogger._sender_thread.start()
55
+ def write(self, message):
56
+ """
57
+ Loguru 会调用此方法。
58
+ message 参数实际上是 loguru.Message 对象,可以通过 message.record 获取所有字段。
59
+ """
60
+ try:
61
+ # 1. 获取原始日志记录
62
+ record = message.record
84
63
 
85
- # 注册退出处理
86
- atexit.register(KafkaLogger.close)
64
+ # 2. 提取 TraceID
65
+ trace_id = None
66
+ try:
67
+ # 如果业务方传的是 JSON 字符串作为 message
68
+ msg_obj = json.loads(record["message"])
69
+ if isinstance(msg_obj, dict):
70
+ trace_id = msg_obj.get("trace_id")
71
+ except:
72
+ pass
73
+
74
+ if not trace_id:
75
+ trace_id = current_trace_id.get()
76
+
77
+ if not trace_id:
78
+ trace_id = str(Snowflake.id)
79
+ else:
80
+ trace_id = str(trace_id)
81
+
82
+ # 3. 提取异常详情 (如果有)
83
+ error_detail = ""
84
+ if record["exception"] is not None:
85
+ # Loguru 的 exception 对象
86
+ error_detail = "".join(traceback.format_exception(
87
+ record["exception"].type,
88
+ record["exception"].value,
89
+ record["exception"].traceback
90
+ ))
91
+ elif "error" in record["extra"]:
92
+ # 兼容其他方式注入的异常
93
+ error_detail = str(record["extra"].get("error"))
94
+
95
+ # 4. 获取主机信息
96
+ try:
97
+ ip = socket.gethostbyname(socket.gethostname())
98
+ except:
99
+ ip = '127.0.0.1'
100
+ host_name = socket.gethostname()
87
101
 
88
- # 设置全局异常处理器
89
- sys.excepthook = KafkaLogger._handle_exception
102
+ # 5. 获取线程/协程信息
103
+ try:
104
+ task = asyncio.current_task()
105
+ thread_info = f"coroutine:{task.get_name()}" if task else f"thread:{threading.current_thread().name}"
106
+ except RuntimeError:
107
+ thread_info = f"thread:{threading.current_thread().name}"
108
+
109
+ # 6. 提取类名/文件名信息
110
+ file_name = record["file"].name
111
+ logger_name = record["name"]
112
+ if logger_name and logger_name != file_name:
113
+ class_name = f"{file_name}:{logger_name}"
114
+ else:
115
+ class_name = file_name
116
+
117
+ # 7. 构建最终的 Kafka 日志结构
118
+ log_entry = {
119
+ "traceId": trace_id,
120
+ "sySpanId": "",
121
+ "syBizId": "",
122
+ "ptxId": "",
123
+ "time": record["time"].strftime("%Y-%m-%d %H:%M:%S"),
124
+ "day": datetime.now().strftime("%Y.%m.%d"),
125
+ "msg": record["message"],
126
+ "detail": error_detail,
127
+ "ip": ip,
128
+ "hostName": host_name,
129
+ "tenantId": "",
130
+ "userId": "",
131
+ "customerId": "",
132
+ "env": Config().config.get('Nacos', {}).get('namespaceId', ''),
133
+ "priReqSource": "",
134
+ "reqSource": "",
135
+ "serviceId": self.service_id,
136
+ "logLevel": record["level"].name,
137
+ "className": class_name,
138
+ "method": record["function"],
139
+ "line": str(record["line"]),
140
+ "theadName": thread_info,
141
+ "sqlCost": 0,
142
+ "size": len(str(record["message"])),
143
+ "uid": int(Snowflake.id)
144
+ }
90
145
 
91
- def custom_log_handler(record):
92
- # 检查record是否是Message对象
93
- if isinstance(record, loguru._handler.Message):
94
- # 从Message对象中获取原始日志记录
95
- record = record.record
96
-
97
- # 提取基本信息
98
- message = record["message"]
99
- level = record["level"].name
100
- time_str = record["time"].strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
101
-
102
- # 提取文件、函数和行号信息
103
- file_info = record["file"].name
104
- function_info = record["function"]
105
- line_info = record["line"]
106
-
107
- # 尝试从message中提取trace_id
108
- trace_id = None
109
- try:
110
- if isinstance(message, str):
111
- msg_dict = json.loads(message)
112
- trace_id = msg_dict.get("trace_id")
113
- except json.JSONDecodeError:
114
- trace_id = None
115
-
116
- if not trace_id:
117
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
118
-
119
- # 获取线程/协程信息
120
- thread_info = SYLogger._get_execution_context()
121
-
122
- # 获取主机信息
123
- try:
124
- ip = socket.gethostbyname(socket.gethostname())
125
- except socket.gaierror:
126
- ip = '127.0.0.1'
127
- host_name = socket.gethostname()
128
-
129
- # 检查是否有错误信息并设置detail字段
130
- error_detail = ""
131
- if level == "ERROR" and record["exception"] is not None:
132
- error_detail = "".join(traceback.format_exception(
133
- record["exception"].type,
134
- record["exception"].value,
135
- record["exception"].traceback
136
- ))
137
-
138
- # 获取logger名称作为类名
139
- class_name = record["name"]
140
-
141
- # 合并文件名和类名信息
142
- if file_info and class_name:
143
- full_class_name = f"{file_info}:{class_name}"
144
- elif file_info:
145
- full_class_name = file_info
146
- else:
147
- full_class_name = class_name
148
-
149
- # 构建日志条目
150
- log_entry = {
151
- "traceId": trace_id,
152
- "sySpanId": "",
153
- "syBizId": "",
154
- "ptxId": "",
155
- "time": time_str,
156
- "day": datetime.now().strftime("%Y.%m.%d"),
157
- "msg": message,
158
- "detail": error_detail,
159
- "ip": ip,
160
- "hostName": host_name,
161
- "tenantId": "",
162
- "userId": "",
163
- "customerId": "",
164
- "env": Config().config['Nacos']['namespaceId'],
165
- "priReqSource": "",
166
- "reqSource": "",
167
- "serviceId": KafkaLogger._service_id,
168
- "logLevel": level,
169
- "classShortName": "",
170
- "method": "",
171
- "line": "",
172
- "theadName": thread_info,
173
- "className": "",
174
- "sqlCost": 0,
175
- "size": len(str(message)),
176
- "uid": int(Snowflake.next_id()) # 独立新的id
177
- }
178
-
179
- # 智能队列管理
180
- if not KafkaLogger._safe_put_to_queue(log_entry):
181
- logger.warning(json.dumps({
182
- "trace_id": trace_id,
183
- "message": "Log queue is full, log discarded",
184
- "level": "WARNING"
185
- }, ensure_ascii=False))
186
-
187
- # 配置日志处理器
146
+ # 8. 发送
147
+ self._producer.send("shengye-json-log", log_entry)
148
+
149
+ except Exception as e:
150
+ print(f"KafkaSink Error: {e}")
151
+
152
+ def flush(self):
153
+ if self._producer:
154
+ self._producer.flush(timeout=5)
155
+
156
+
157
+ class KafkaLogger(metaclass=SingletonMeta):
158
+ _sink_instance = None
159
+
160
+ @staticmethod
161
+ def setup_logger(config: dict):
188
162
  logger.remove()
189
163
 
190
- # 添加Kafka日志处理器
164
+ from sycommon.synacos.nacos_service import NacosService
165
+ service_id = NacosService(config).service_name
166
+
167
+ KafkaLogger._sink_instance = KafkaSink(service_id)
168
+
191
169
  logger.add(
192
- custom_log_handler,
170
+ KafkaLogger._sink_instance,
193
171
  level="INFO",
194
- enqueue=True # 使用Loguru的队列功能
172
+ format="{message}",
173
+ enqueue=True,
174
+ backtrace=True,
175
+ diagnose=True
195
176
  )
196
177
 
197
- # 添加控制台错误日志处理器
198
178
  logger.add(
199
179
  sink=sys.stdout,
200
180
  level="ERROR",
201
181
  format=LOGURU_FORMAT,
202
- colorize=True, # 启用颜色
203
- filter=lambda record: record["level"].name == "ERROR"
182
+ colorize=True,
183
+ backtrace=True,
184
+ diagnose=True
204
185
  )
205
186
 
187
+ sys.excepthook = KafkaLogger._handle_exception
188
+
206
189
  @staticmethod
207
190
  def _handle_exception(exc_type, exc_value, exc_traceback):
208
- """全局异常处理器"""
209
- # 跳过键盘中断(Ctrl+C)
210
191
  if issubclass(exc_type, KeyboardInterrupt):
211
192
  sys.__excepthook__(exc_type, exc_value, exc_traceback)
212
193
  return
213
194
 
214
- # 获取当前的trace_id
215
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
216
-
217
- # 构建错误日志
218
- error_log = {
195
+ trace_id = current_trace_id.get() or str(Snowflake.id)
196
+ error_msg = json.dumps({
219
197
  "trace_id": trace_id,
220
- "message": f"Uncaught exception: {exc_type.__name__}: {str(exc_value)}",
221
- "level": "ERROR",
222
- "detail": "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
223
- }
198
+ "message": f"Uncaught exception: {exc_type.__name__}",
199
+ "level": "ERROR"
200
+ }, ensure_ascii=False)
224
201
 
225
- # 使用Loguru记录错误,确保包含完整堆栈跟踪
226
- logger.opt(exception=(exc_type, exc_value, exc_traceback)).error(
227
- json.dumps(error_log, ensure_ascii=False)
228
- )
229
-
230
- @staticmethod
231
- def _safe_put_to_queue(log_entry):
232
- """安全放入队列,提供更健壮的队列管理"""
233
- try:
234
- # 检查队列水位并发出警告
235
- current_time = time.time()
236
- qsize = KafkaLogger._log_queue.qsize()
237
-
238
- if qsize > KafkaLogger._queue_warning_threshold:
239
- if current_time - KafkaLogger._last_queue_warning > KafkaLogger._queue_warning_interval:
240
- warning_msg = f"Log queue at {qsize}/{KafkaLogger._log_queue.maxsize} capacity"
241
- print(warning_msg)
242
- logger.warning(json.dumps({
243
- "trace_id": log_entry.get("traceId"),
244
- "message": warning_msg,
245
- "level": "WARNING"
246
- }, ensure_ascii=False))
247
- KafkaLogger._last_queue_warning = current_time
248
-
249
- # 尝试快速放入
250
- KafkaLogger._log_queue.put(log_entry, block=False)
251
- return True
252
- except Full:
253
- # 队列已满时的处理策略
254
- if KafkaLogger._stop_event.is_set():
255
- # 关闭过程中直接丢弃日志
256
- return False
257
-
258
- # 尝试移除最旧的日志并添加新日志
259
- try:
260
- with threading.Lock(): # 添加锁确保操作原子性
261
- if not KafkaLogger._log_queue.empty():
262
- KafkaLogger._log_queue.get_nowait()
263
- KafkaLogger._log_queue.put_nowait(log_entry)
264
- return True
265
- except Exception:
266
- return False
267
-
268
- @staticmethod
269
- def _send_logs():
270
- """后台线程:批量发送日志到Kafka,优化内存使用"""
271
- batch = []
272
- last_flush = time.time()
273
- batch_size = 100
274
- flush_interval = 1 # 秒
275
- consecutive_errors = 0
276
- max_consecutive_errors = 10 # 最大连续错误数,超过则降低处理速度
277
- last_reconnect_attempt = 0
278
- reconnect_interval = 30 # 重新连接尝试间隔,秒
279
-
280
- while not KafkaLogger._stop_event.is_set():
281
- try:
282
- # 检查生产者状态,如果长时间失败,尝试重新创建生产者
283
- current_time = time.time()
284
- if consecutive_errors > max_consecutive_errors and current_time - last_reconnect_attempt > reconnect_interval:
285
- logger.warning(json.dumps({
286
- "trace_id": "system",
287
- "message": "尝试重新创建Kafka生产者以解决连接问题",
288
- "level": "WARNING"
289
- }, ensure_ascii=False))
290
- last_reconnect_attempt = current_time
291
-
292
- # 尝试重新创建生产者
293
- try:
294
- # 使用类变量中存储的配置
295
- from sycommon.synacos.nacos_service import NacosService
296
- common = NacosService(
297
- KafkaLogger._config).share_configs.get("common.yml", {})
298
- bootstrap_servers = common.get("log", {}).get(
299
- "kafka", {}).get("servers", None)
300
-
301
- # 关闭旧生产者
302
- if KafkaLogger._producer:
303
- KafkaLogger._producer.close(timeout=5)
304
-
305
- # 创建新生产者
306
- KafkaLogger._producer = KafkaProducer(
307
- bootstrap_servers=bootstrap_servers,
308
- value_serializer=lambda v: json.dumps(
309
- v, ensure_ascii=False).encode('utf-8'),
310
- max_block_ms=60000,
311
- retries=10,
312
- request_timeout_ms=30000,
313
- compression_type='gzip',
314
- batch_size=16384,
315
- linger_ms=5,
316
- buffer_memory=67108864,
317
- connections_max_idle_ms=540000,
318
- reconnect_backoff_max_ms=10000,
319
- )
320
- consecutive_errors = 0
321
- logger.info(json.dumps({
322
- "trace_id": "system",
323
- "message": "Kafka生产者已重新创建",
324
- "level": "INFO"
325
- }, ensure_ascii=False))
326
- except Exception as e:
327
- logger.error(json.dumps({
328
- "trace_id": "system",
329
- "message": f"重新创建Kafka生产者失败: {str(e)}",
330
- "level": "ERROR"
331
- }, ensure_ascii=False))
332
-
333
- # 批量获取日志
334
- while len(batch) < batch_size and not KafkaLogger._stop_event.is_set():
335
- try:
336
- # 使用超时获取,避免长时间阻塞
337
- log_entry = KafkaLogger._log_queue.get(timeout=0.5)
338
- batch.append(log_entry)
339
- except Empty:
340
- break
341
-
342
- # 定时或定量发送
343
- current_time = time.time()
344
- if batch and (len(batch) >= batch_size or (current_time - last_flush > flush_interval)):
345
- try:
346
- # 分批发送,避免一次发送过大
347
- sub_batch_size = min(50, batch_size)
348
- for i in range(0, len(batch), sub_batch_size):
349
- sub_batch = batch[i:i+sub_batch_size]
350
- for entry in sub_batch:
351
- KafkaLogger._producer.send(
352
- KafkaLogger._topic, entry)
353
- KafkaLogger._producer.flush(timeout=15)
354
-
355
- batch = [] # 发送成功后清空批次
356
- last_flush = current_time
357
- consecutive_errors = 0 # 重置错误计数
358
- except Exception as e:
359
- consecutive_errors += 1
360
- error_msg = f"Kafka发送失败: {e}"
361
- print(error_msg)
362
- logger.error(json.dumps({
363
- "trace_id": "system",
364
- "message": error_msg,
365
- "level": "ERROR"
366
- }, ensure_ascii=False))
367
-
368
- # 连续错误过多时增加休眠时间,避免CPU空转
369
- if consecutive_errors > max_consecutive_errors:
370
- sleep_time = min(5, consecutive_errors // 2)
371
- time.sleep(sleep_time)
372
-
373
- except Exception as e:
374
- print(f"日志处理线程异常: {e}")
375
- time.sleep(1) # 短暂休眠恢复
376
-
377
- # 退出前发送剩余日志
378
- if batch:
379
- try:
380
- for entry in batch:
381
- KafkaLogger._producer.send(KafkaLogger._topic, entry)
382
- KafkaLogger._producer.flush(
383
- timeout=KafkaLogger._shutdown_timeout)
384
- except Exception as e:
385
- print(f"关闭时发送剩余日志失败: {e}")
202
+ logger.opt(exception=(exc_type, exc_value,
203
+ exc_traceback)).error(error_msg)
386
204
 
387
205
  @staticmethod
388
206
  def close():
389
- """安全关闭资源,增强可靠性"""
390
- if KafkaLogger._stop_event.is_set():
391
- return
392
-
393
- print("开始关闭Kafka日志系统...")
394
- KafkaLogger._stop_event.set()
395
-
396
- # 等待发送线程结束
397
- if KafkaLogger._sender_thread and KafkaLogger._sender_thread.is_alive():
398
- print(f"等待日志发送线程结束,超时时间: {KafkaLogger._shutdown_timeout}秒")
399
- KafkaLogger._sender_thread.join(
400
- timeout=KafkaLogger._shutdown_timeout)
401
-
402
- # 如果线程仍在运行,强制终止(虽然daemon线程会自动终止,但这里显式处理)
403
- if KafkaLogger._sender_thread.is_alive():
404
- print("日志发送线程未能及时结束,将被强制终止")
405
-
406
- # 关闭生产者
407
- if KafkaLogger._producer:
408
- try:
409
- print("关闭Kafka生产者...")
410
- KafkaLogger._producer.close(
411
- timeout=KafkaLogger._shutdown_timeout)
412
- print("Kafka生产者已关闭")
413
- except Exception as e:
414
- print(f"关闭Kafka生产者失败: {e}")
415
-
416
- # 清空队列防止内存滞留
417
- remaining = 0
418
- while not KafkaLogger._log_queue.empty():
419
- try:
420
- KafkaLogger._log_queue.get_nowait()
421
- remaining += 1
422
- except Empty:
423
- break
424
-
425
- print(f"已清空日志队列,剩余日志数: {remaining}")
207
+ if KafkaLogger._sink_instance:
208
+ KafkaLogger._sink_instance.flush()
426
209
 
427
210
 
428
211
  class SYLogger:
429
212
  @staticmethod
430
213
  def get_trace_id():
431
- """从上下文中获取当前的 trace_id"""
432
214
  return current_trace_id.get()
433
215
 
434
216
  @staticmethod
435
217
  def set_trace_id(trace_id: str):
436
- """设置当前的 trace_id"""
437
218
  return current_trace_id.set(trace_id)
438
219
 
439
220
  @staticmethod
440
221
  def reset_trace_id(token):
441
- """重置当前的 trace_id"""
442
222
  current_trace_id.reset(token)
443
223
 
224
+ @staticmethod
225
+ def get_headers():
226
+ return current_headers.get()
227
+
228
+ @staticmethod
229
+ def set_headers(headers: list[tuple[str, str]]):
230
+ return current_headers.set(headers)
231
+
232
+ @staticmethod
233
+ def reset_headers(token):
234
+ current_headers.reset(token)
235
+
444
236
  @staticmethod
445
237
  def _get_execution_context() -> str:
446
- """获取当前执行上下文的线程或协程信息,返回格式化字符串"""
447
238
  try:
448
- # 尝试获取协程信息
449
239
  task = asyncio.current_task()
450
240
  if task:
451
- task_name = task.get_name()
452
- return f"coroutine:{task_name}"
241
+ return f"coroutine:{task.get_name()}"
453
242
  except RuntimeError:
454
- # 不在异步上下文中,获取线程信息
455
- thread = threading.current_thread()
456
- return f"thread:{thread.name}"
457
-
458
- return "unknown"
243
+ pass
244
+ return f"thread:{threading.current_thread().name}"
459
245
 
460
246
  @staticmethod
461
247
  def _log(msg: any, level: str = "INFO"):
462
- trace_id = SYLogger.get_trace_id()
463
-
248
+ """
249
+ 统一日志记录入口
250
+ 修复:手动提取堆栈信息并写入 message,确保 Kafka 能收到
251
+ """
252
+ # 序列化消息
464
253
  if isinstance(msg, dict) or isinstance(msg, list):
465
254
  msg_str = json.dumps(msg, ensure_ascii=False)
466
255
  else:
467
256
  msg_str = str(msg)
468
257
 
469
- # 获取执行上下文信息并格式化为字符串
470
- thread_info = SYLogger._get_execution_context()
258
+ # 构建基础日志字典
259
+ log_dict = {
260
+ "trace_id": str(SYLogger.get_trace_id() or Snowflake.id),
261
+ "message": msg_str,
262
+ "level": level,
263
+ "threadName": SYLogger._get_execution_context()
264
+ }
471
265
 
472
- # 构建日志结构,添加线程/协程信息到threadName字段
473
- request_log = {}
266
+ # 如果是 ERROR 级别,手动获取堆栈并加入 log_dict
474
267
  if level == "ERROR":
475
- request_log = {
476
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
477
- "message": msg_str,
478
- "traceback": traceback.format_exc(),
479
- "level": level,
480
- "threadName": thread_info
481
- }
482
- else:
483
- request_log = {
484
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
485
- "message": msg_str,
486
- "level": level,
487
- "threadName": thread_info
488
- }
268
+ # 获取当前异常信息 (sys.exc_info() 在 except 块中有效)
269
+ exc_info = sys.exc_info()
270
+ if exc_info and exc_info[0] is not None:
271
+ # 将堆栈格式化为字符串,放入 detail 字段
272
+ # 这样 KafkaSink 解析 message 时,就能拿到 detail
273
+ tb_str = "".join(traceback.format_exception(*exc_info))
274
+ log_dict["detail"] = tb_str
275
+
276
+ # 将字典转为 JSON 字符串传给 Loguru
277
+ log_json = json.dumps(log_dict, ensure_ascii=False)
489
278
 
490
- # 选择日志级别
491
- _log = ''
492
279
  if level == "ERROR":
493
- _log = json.dumps(request_log, ensure_ascii=False)
494
- logger.error(_log)
280
+ # 依然使用 opt(exception=True) 让控制台打印彩色堆栈
281
+ # 注意:Loguru 内部可能会忽略我们已经塞进去的 detail 字符串,
282
+ # 但这没关系,因为 KafkaSink 解析 message 字符串时会重新读取 detail
283
+ logger.opt(exception=True).error(log_json)
495
284
  elif level == "WARNING":
496
- _log = json.dumps(request_log, ensure_ascii=False)
497
- logger.warning(_log)
285
+ logger.warning(log_json)
498
286
  else:
499
- _log = json.dumps(request_log, ensure_ascii=False)
500
- logger.info(_log)
287
+ logger.info(log_json)
501
288
 
502
- if os.getenv('DEV-LOG', 'false').lower() == 'true':
503
- pprint.pprint(_log)
289
+ if check_env_flag(['DEV-LOG']):
290
+ print(log_json)
504
291
 
505
292
  @staticmethod
506
293
  def info(msg: any, *args, **kwargs):
@@ -520,25 +307,4 @@ class SYLogger:
520
307
 
521
308
  @staticmethod
522
309
  def exception(msg: any, *args, **kwargs):
523
- """记录异常信息,包括完整堆栈"""
524
- trace_id = SYLogger.get_trace_id()
525
-
526
- if isinstance(msg, dict) or isinstance(msg, list):
527
- msg_str = json.dumps(msg, ensure_ascii=False)
528
- else:
529
- msg_str = str(msg)
530
-
531
- # 获取执行上下文信息
532
- thread_info = SYLogger._get_execution_context()
533
-
534
- # 构建包含异常堆栈的日志
535
- request_log = {
536
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
537
- "message": msg_str,
538
- "level": "ERROR",
539
- "threadName": thread_info
540
- }
541
-
542
- # 使用Loguru记录完整异常堆栈
543
- logger.opt(exception=True).error(
544
- json.dumps(request_log, ensure_ascii=False))
310
+ SYLogger._log(msg, "ERROR")