sycommon-python-lib 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sycommon-python-lib might be problematic. Click here for more details.
- sycommon/__init__.py +0 -0
- sycommon/config/Config.py +73 -0
- sycommon/config/DatabaseConfig.py +34 -0
- sycommon/config/EmbeddingConfig.py +16 -0
- sycommon/config/LLMConfig.py +16 -0
- sycommon/config/RerankerConfig.py +13 -0
- sycommon/config/__init__.py +0 -0
- sycommon/database/database_service.py +79 -0
- sycommon/health/__init__.py +0 -0
- sycommon/health/health_check.py +17 -0
- sycommon/health/ping.py +13 -0
- sycommon/logging/__init__.py +0 -0
- sycommon/logging/kafka_log.py +551 -0
- sycommon/logging/logger_wrapper.py +19 -0
- sycommon/middleware/__init__.py +0 -0
- sycommon/middleware/context.py +3 -0
- sycommon/middleware/cors.py +14 -0
- sycommon/middleware/exception.py +85 -0
- sycommon/middleware/middleware.py +32 -0
- sycommon/middleware/monitor_memory.py +22 -0
- sycommon/middleware/timeout.py +19 -0
- sycommon/middleware/traceid.py +138 -0
- sycommon/models/__init__.py +0 -0
- sycommon/models/log.py +30 -0
- sycommon/services.py +29 -0
- sycommon/synacos/__init__.py +0 -0
- sycommon/synacos/feign.py +307 -0
- sycommon/synacos/nacos_service.py +689 -0
- sycommon/tools/__init__.py +0 -0
- sycommon/tools/snowflake.py +11 -0
- sycommon/tools/timing.py +73 -0
- sycommon_python_lib-0.1.0.dist-info/METADATA +128 -0
- sycommon_python_lib-0.1.0.dist-info/RECORD +35 -0
- sycommon_python_lib-0.1.0.dist-info/WHEEL +5 -0
- sycommon_python_lib-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import traceback
|
|
3
|
+
import asyncio
|
|
4
|
+
import atexit
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
import socket
|
|
9
|
+
import time
|
|
10
|
+
import threading
|
|
11
|
+
import loguru
|
|
12
|
+
from queue import Queue, Full, Empty
|
|
13
|
+
from kafka import KafkaProducer
|
|
14
|
+
from loguru import logger
|
|
15
|
+
from sycommon.config.Config import Config, SingletonMeta
|
|
16
|
+
from sycommon.middleware.context import current_trace_id
|
|
17
|
+
from sycommon.tools.snowflake import Snowflake
|
|
18
|
+
|
|
19
|
+
# 配置Loguru的颜色方案
|
|
20
|
+
LOGURU_FORMAT = (
|
|
21
|
+
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
|
|
22
|
+
"<level>{level: <8}</level> | "
|
|
23
|
+
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
|
|
24
|
+
"<level>{message}</level>"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class KafkaLogger(metaclass=SingletonMeta):
|
|
29
|
+
_producer = None
|
|
30
|
+
_topic = None
|
|
31
|
+
_service_id = None
|
|
32
|
+
_log_queue = Queue(maxsize=10000)
|
|
33
|
+
_stop_event = threading.Event()
|
|
34
|
+
_sender_thread = None
|
|
35
|
+
_log_pattern = re.compile(
|
|
36
|
+
r'^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+)\s*\|\s*(\w+)\s*\|\s*(\S+):(\S+):(\d+)\s*-\s*(\{.*\})\s*$'
|
|
37
|
+
)
|
|
38
|
+
_queue_warning_threshold = 9000
|
|
39
|
+
_queue_warning_interval = 60 # 秒
|
|
40
|
+
_last_queue_warning = 0
|
|
41
|
+
_shutdown_timeout = 15 # 关闭超时时间,秒
|
|
42
|
+
_config = None # 新增配置变量存储
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def setup_logger(config: dict):
|
|
46
|
+
# 保存配置到类变量
|
|
47
|
+
KafkaLogger._config = config
|
|
48
|
+
|
|
49
|
+
from sycommon.synacos.nacos_service import NacosService
|
|
50
|
+
KafkaLogger._topic = "shengye-json-log"
|
|
51
|
+
KafkaLogger._service_id = NacosService(config).service_name
|
|
52
|
+
|
|
53
|
+
# 获取 common 配置
|
|
54
|
+
common = NacosService(config).share_configs.get("common.yml", {})
|
|
55
|
+
bootstrap_servers = common.get("log", {}).get(
|
|
56
|
+
"kafka", {}).get("servers", None)
|
|
57
|
+
|
|
58
|
+
# 创建生产者,优化配置参数
|
|
59
|
+
KafkaLogger._producer = KafkaProducer(
|
|
60
|
+
bootstrap_servers=bootstrap_servers,
|
|
61
|
+
value_serializer=lambda v: json.dumps(
|
|
62
|
+
v, ensure_ascii=False).encode('utf-8'),
|
|
63
|
+
max_block_ms=60000, # 增加最大阻塞时间从30秒到60秒
|
|
64
|
+
retries=10, # 增加重试次数从5次到10次
|
|
65
|
+
request_timeout_ms=30000, # 增加请求超时时间从10秒到30秒
|
|
66
|
+
compression_type='gzip', # 添加压缩以减少网络传输量
|
|
67
|
+
batch_size=16384, # 增大批处理大小
|
|
68
|
+
linger_ms=5, # 添加短暂延迟以允许更多消息批处理
|
|
69
|
+
buffer_memory=67108864, # 增大缓冲区内存
|
|
70
|
+
connections_max_idle_ms=540000, # 连接最大空闲时间
|
|
71
|
+
reconnect_backoff_max_ms=10000, # 增加重连退避最大时间
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# 启动后台发送线程
|
|
75
|
+
KafkaLogger._sender_thread = threading.Thread(
|
|
76
|
+
target=KafkaLogger._send_logs,
|
|
77
|
+
daemon=True
|
|
78
|
+
)
|
|
79
|
+
KafkaLogger._sender_thread.start()
|
|
80
|
+
|
|
81
|
+
# 注册退出处理
|
|
82
|
+
atexit.register(KafkaLogger.close)
|
|
83
|
+
|
|
84
|
+
# 设置全局异常处理器
|
|
85
|
+
sys.excepthook = KafkaLogger._handle_exception
|
|
86
|
+
|
|
87
|
+
def pars_log(record):
|
|
88
|
+
match = KafkaLogger._log_pattern.match(record.strip())
|
|
89
|
+
if match:
|
|
90
|
+
time_str = match.group(1)
|
|
91
|
+
level = match.group(2)
|
|
92
|
+
module = match.group(3)
|
|
93
|
+
function = match.group(4)
|
|
94
|
+
line = int(match.group(5))
|
|
95
|
+
message_str = match.group(6)
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
message = json.loads(message_str)
|
|
99
|
+
except json.JSONDecodeError:
|
|
100
|
+
message = message_str
|
|
101
|
+
|
|
102
|
+
return {
|
|
103
|
+
"time": time_str,
|
|
104
|
+
"level": level,
|
|
105
|
+
"module": module,
|
|
106
|
+
"function": function,
|
|
107
|
+
"line": line,
|
|
108
|
+
"message": message
|
|
109
|
+
}
|
|
110
|
+
else:
|
|
111
|
+
# 使用当前时间避免重复创建相同的时间字符串
|
|
112
|
+
current_time = datetime.now()
|
|
113
|
+
return {
|
|
114
|
+
"message": record,
|
|
115
|
+
"level": "ERROR",
|
|
116
|
+
"time": current_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
def custom_log_handler(record):
|
|
120
|
+
# 检查record是否是Message对象
|
|
121
|
+
if isinstance(record, loguru._handler.Message):
|
|
122
|
+
# 从Message对象中获取原始日志记录
|
|
123
|
+
record = record.record
|
|
124
|
+
|
|
125
|
+
# 提取基本信息
|
|
126
|
+
message = record["message"]
|
|
127
|
+
level = record["level"].name
|
|
128
|
+
time_str = record["time"].strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
129
|
+
|
|
130
|
+
# 提取文件、函数和行号信息
|
|
131
|
+
file_info = record["file"].name
|
|
132
|
+
function_info = record["function"]
|
|
133
|
+
line_info = record["line"]
|
|
134
|
+
|
|
135
|
+
# 尝试从message中提取trace_id
|
|
136
|
+
trace_id = None
|
|
137
|
+
try:
|
|
138
|
+
if isinstance(message, str):
|
|
139
|
+
msg_dict = json.loads(message)
|
|
140
|
+
trace_id = msg_dict.get("trace_id")
|
|
141
|
+
except json.JSONDecodeError:
|
|
142
|
+
trace_id = None
|
|
143
|
+
|
|
144
|
+
if not trace_id:
|
|
145
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
|
|
146
|
+
|
|
147
|
+
# 获取线程/协程信息
|
|
148
|
+
thread_info = SYLogger._get_execution_context()
|
|
149
|
+
|
|
150
|
+
# 获取主机信息
|
|
151
|
+
try:
|
|
152
|
+
ip = socket.gethostbyname(socket.gethostname())
|
|
153
|
+
except socket.gaierror:
|
|
154
|
+
ip = '127.0.0.1'
|
|
155
|
+
host_name = socket.gethostname()
|
|
156
|
+
|
|
157
|
+
# 检查是否有错误信息并设置detail字段
|
|
158
|
+
error_detail = ""
|
|
159
|
+
if level == "ERROR" and record.get("exception"):
|
|
160
|
+
error_detail = "".join(traceback.format_exception(
|
|
161
|
+
record["exception"].type,
|
|
162
|
+
record["exception"].value,
|
|
163
|
+
record["exception"].traceback
|
|
164
|
+
))
|
|
165
|
+
|
|
166
|
+
# 获取logger名称作为类名
|
|
167
|
+
class_name = record["name"]
|
|
168
|
+
|
|
169
|
+
# 合并文件名和类名信息
|
|
170
|
+
if file_info and class_name:
|
|
171
|
+
full_class_name = f"{file_info}:{class_name}"
|
|
172
|
+
elif file_info:
|
|
173
|
+
full_class_name = file_info
|
|
174
|
+
else:
|
|
175
|
+
full_class_name = class_name
|
|
176
|
+
|
|
177
|
+
# 构建日志条目
|
|
178
|
+
log_entry = {
|
|
179
|
+
"traceId": trace_id,
|
|
180
|
+
"sySpanId": "",
|
|
181
|
+
"syBizId": "",
|
|
182
|
+
"ptxId": "",
|
|
183
|
+
"time": time_str,
|
|
184
|
+
"day": datetime.now().strftime("%Y.%m.%d"),
|
|
185
|
+
"msg": message,
|
|
186
|
+
"detail": error_detail,
|
|
187
|
+
"ip": ip,
|
|
188
|
+
"hostName": host_name,
|
|
189
|
+
"tenantId": "",
|
|
190
|
+
"userId": "",
|
|
191
|
+
"customerId": "",
|
|
192
|
+
"env": Config().config['Nacos']['namespaceId'],
|
|
193
|
+
"priReqSource": "",
|
|
194
|
+
"reqSource": "",
|
|
195
|
+
"serviceId": KafkaLogger._service_id,
|
|
196
|
+
"logLevel": level,
|
|
197
|
+
"classShortName": "",
|
|
198
|
+
"method": "",
|
|
199
|
+
"line": "",
|
|
200
|
+
"theadName": thread_info,
|
|
201
|
+
"className": "",
|
|
202
|
+
"sqlCost": 0,
|
|
203
|
+
"size": len(str(message)),
|
|
204
|
+
"uid": int(trace_id) if trace_id and trace_id.isdigit() else 0
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
# 智能队列管理
|
|
208
|
+
if not KafkaLogger._safe_put_to_queue(log_entry):
|
|
209
|
+
logger.warning(json.dumps({
|
|
210
|
+
"trace_id": trace_id,
|
|
211
|
+
"message": "Log queue is full, log discarded",
|
|
212
|
+
"level": "WARNING"
|
|
213
|
+
}, ensure_ascii=False))
|
|
214
|
+
|
|
215
|
+
# 配置日志处理器
|
|
216
|
+
logger.remove()
|
|
217
|
+
|
|
218
|
+
# 添加Kafka日志处理器
|
|
219
|
+
logger.add(
|
|
220
|
+
custom_log_handler,
|
|
221
|
+
level="INFO",
|
|
222
|
+
enqueue=True # 使用Loguru的队列功能
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# 添加控制台错误日志处理器
|
|
226
|
+
logger.add(
|
|
227
|
+
sink=sys.stdout,
|
|
228
|
+
level="ERROR",
|
|
229
|
+
format=LOGURU_FORMAT,
|
|
230
|
+
colorize=True, # 启用颜色
|
|
231
|
+
filter=lambda record: record["level"].name == "ERROR"
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
@staticmethod
|
|
235
|
+
def _handle_exception(exc_type, exc_value, exc_traceback):
|
|
236
|
+
"""全局异常处理器"""
|
|
237
|
+
# 跳过键盘中断(Ctrl+C)
|
|
238
|
+
if issubclass(exc_type, KeyboardInterrupt):
|
|
239
|
+
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
|
240
|
+
return
|
|
241
|
+
|
|
242
|
+
# 获取当前的trace_id
|
|
243
|
+
trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
|
|
244
|
+
|
|
245
|
+
# 构建错误日志
|
|
246
|
+
error_log = {
|
|
247
|
+
"trace_id": trace_id,
|
|
248
|
+
"message": f"Uncaught exception: {exc_type.__name__}: {str(exc_value)}",
|
|
249
|
+
"level": "ERROR",
|
|
250
|
+
"detail": "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
# 使用Loguru记录错误,确保包含完整堆栈跟踪
|
|
254
|
+
logger.opt(exception=(exc_type, exc_value, exc_traceback)).error(
|
|
255
|
+
json.dumps(error_log, ensure_ascii=False)
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
@staticmethod
|
|
259
|
+
def _safe_put_to_queue(log_entry):
|
|
260
|
+
"""安全放入队列,提供更健壮的队列管理"""
|
|
261
|
+
try:
|
|
262
|
+
# 检查队列水位并发出警告
|
|
263
|
+
current_time = time.time()
|
|
264
|
+
qsize = KafkaLogger._log_queue.qsize()
|
|
265
|
+
|
|
266
|
+
if qsize > KafkaLogger._queue_warning_threshold:
|
|
267
|
+
if current_time - KafkaLogger._last_queue_warning > KafkaLogger._queue_warning_interval:
|
|
268
|
+
warning_msg = f"Log queue at {qsize}/{KafkaLogger._log_queue.maxsize} capacity"
|
|
269
|
+
print(warning_msg)
|
|
270
|
+
logger.warning(json.dumps({
|
|
271
|
+
"trace_id": log_entry.get("traceId"),
|
|
272
|
+
"message": warning_msg,
|
|
273
|
+
"level": "WARNING"
|
|
274
|
+
}, ensure_ascii=False))
|
|
275
|
+
KafkaLogger._last_queue_warning = current_time
|
|
276
|
+
|
|
277
|
+
# 尝试快速放入
|
|
278
|
+
KafkaLogger._log_queue.put(log_entry, block=False)
|
|
279
|
+
return True
|
|
280
|
+
except Full:
|
|
281
|
+
# 队列已满时的处理策略
|
|
282
|
+
if KafkaLogger._stop_event.is_set():
|
|
283
|
+
# 关闭过程中直接丢弃日志
|
|
284
|
+
return False
|
|
285
|
+
|
|
286
|
+
# 尝试移除最旧的日志并添加新日志
|
|
287
|
+
try:
|
|
288
|
+
with threading.Lock(): # 添加锁确保操作原子性
|
|
289
|
+
if not KafkaLogger._log_queue.empty():
|
|
290
|
+
KafkaLogger._log_queue.get_nowait()
|
|
291
|
+
KafkaLogger._log_queue.put_nowait(log_entry)
|
|
292
|
+
return True
|
|
293
|
+
except Exception:
|
|
294
|
+
return False
|
|
295
|
+
|
|
296
|
+
@staticmethod
|
|
297
|
+
def _send_logs():
|
|
298
|
+
"""后台线程:批量发送日志到Kafka,优化内存使用"""
|
|
299
|
+
batch = []
|
|
300
|
+
last_flush = time.time()
|
|
301
|
+
batch_size = 100
|
|
302
|
+
flush_interval = 1 # 秒
|
|
303
|
+
consecutive_errors = 0
|
|
304
|
+
max_consecutive_errors = 10 # 最大连续错误数,超过则降低处理速度
|
|
305
|
+
last_reconnect_attempt = 0
|
|
306
|
+
reconnect_interval = 30 # 重新连接尝试间隔,秒
|
|
307
|
+
|
|
308
|
+
while not KafkaLogger._stop_event.is_set():
|
|
309
|
+
try:
|
|
310
|
+
# 检查生产者状态,如果长时间失败,尝试重新创建生产者
|
|
311
|
+
current_time = time.time()
|
|
312
|
+
if consecutive_errors > max_consecutive_errors and current_time - last_reconnect_attempt > reconnect_interval:
|
|
313
|
+
logger.warning(json.dumps({
|
|
314
|
+
"trace_id": "system",
|
|
315
|
+
"message": "尝试重新创建Kafka生产者以解决连接问题",
|
|
316
|
+
"level": "WARNING"
|
|
317
|
+
}, ensure_ascii=False))
|
|
318
|
+
last_reconnect_attempt = current_time
|
|
319
|
+
|
|
320
|
+
# 尝试重新创建生产者
|
|
321
|
+
try:
|
|
322
|
+
# 使用类变量中存储的配置
|
|
323
|
+
from sycommon.synacos.nacos_service import NacosService
|
|
324
|
+
common = NacosService(
|
|
325
|
+
KafkaLogger._config).share_configs.get("common.yml", {})
|
|
326
|
+
bootstrap_servers = common.get("log", {}).get(
|
|
327
|
+
"kafka", {}).get("servers", None)
|
|
328
|
+
|
|
329
|
+
# 关闭旧生产者
|
|
330
|
+
if KafkaLogger._producer:
|
|
331
|
+
KafkaLogger._producer.close(timeout=5)
|
|
332
|
+
|
|
333
|
+
# 创建新生产者
|
|
334
|
+
KafkaLogger._producer = KafkaProducer(
|
|
335
|
+
bootstrap_servers=bootstrap_servers,
|
|
336
|
+
value_serializer=lambda v: json.dumps(
|
|
337
|
+
v, ensure_ascii=False).encode('utf-8'),
|
|
338
|
+
max_block_ms=60000,
|
|
339
|
+
retries=10,
|
|
340
|
+
request_timeout_ms=30000,
|
|
341
|
+
compression_type='gzip',
|
|
342
|
+
batch_size=16384,
|
|
343
|
+
linger_ms=5,
|
|
344
|
+
buffer_memory=67108864,
|
|
345
|
+
connections_max_idle_ms=540000,
|
|
346
|
+
reconnect_backoff_max_ms=10000,
|
|
347
|
+
)
|
|
348
|
+
consecutive_errors = 0
|
|
349
|
+
logger.info(json.dumps({
|
|
350
|
+
"trace_id": "system",
|
|
351
|
+
"message": "Kafka生产者已重新创建",
|
|
352
|
+
"level": "INFO"
|
|
353
|
+
}, ensure_ascii=False))
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.error(json.dumps({
|
|
356
|
+
"trace_id": "system",
|
|
357
|
+
"message": f"重新创建Kafka生产者失败: {str(e)}",
|
|
358
|
+
"level": "ERROR"
|
|
359
|
+
}, ensure_ascii=False))
|
|
360
|
+
|
|
361
|
+
# 批量获取日志
|
|
362
|
+
while len(batch) < batch_size and not KafkaLogger._stop_event.is_set():
|
|
363
|
+
try:
|
|
364
|
+
# 使用超时获取,避免长时间阻塞
|
|
365
|
+
log_entry = KafkaLogger._log_queue.get(timeout=0.5)
|
|
366
|
+
batch.append(log_entry)
|
|
367
|
+
except Empty:
|
|
368
|
+
break
|
|
369
|
+
|
|
370
|
+
# 定时或定量发送
|
|
371
|
+
current_time = time.time()
|
|
372
|
+
if batch and (len(batch) >= batch_size or (current_time - last_flush > flush_interval)):
|
|
373
|
+
try:
|
|
374
|
+
# 分批发送,避免一次发送过大
|
|
375
|
+
sub_batch_size = min(50, batch_size)
|
|
376
|
+
for i in range(0, len(batch), sub_batch_size):
|
|
377
|
+
sub_batch = batch[i:i+sub_batch_size]
|
|
378
|
+
for entry in sub_batch:
|
|
379
|
+
KafkaLogger._producer.send(
|
|
380
|
+
KafkaLogger._topic, entry)
|
|
381
|
+
KafkaLogger._producer.flush(timeout=15)
|
|
382
|
+
|
|
383
|
+
batch = [] # 发送成功后清空批次
|
|
384
|
+
last_flush = current_time
|
|
385
|
+
consecutive_errors = 0 # 重置错误计数
|
|
386
|
+
except Exception as e:
|
|
387
|
+
consecutive_errors += 1
|
|
388
|
+
error_msg = f"Kafka发送失败: {e}"
|
|
389
|
+
print(error_msg)
|
|
390
|
+
logger.error(json.dumps({
|
|
391
|
+
"trace_id": "system",
|
|
392
|
+
"message": error_msg,
|
|
393
|
+
"level": "ERROR"
|
|
394
|
+
}, ensure_ascii=False))
|
|
395
|
+
|
|
396
|
+
# 连续错误过多时增加休眠时间,避免CPU空转
|
|
397
|
+
if consecutive_errors > max_consecutive_errors:
|
|
398
|
+
sleep_time = min(5, consecutive_errors // 2)
|
|
399
|
+
time.sleep(sleep_time)
|
|
400
|
+
|
|
401
|
+
except Exception as e:
|
|
402
|
+
print(f"日志处理线程异常: {e}")
|
|
403
|
+
time.sleep(1) # 短暂休眠恢复
|
|
404
|
+
|
|
405
|
+
# 退出前发送剩余日志
|
|
406
|
+
if batch:
|
|
407
|
+
try:
|
|
408
|
+
for entry in batch:
|
|
409
|
+
KafkaLogger._producer.send(KafkaLogger._topic, entry)
|
|
410
|
+
KafkaLogger._producer.flush(
|
|
411
|
+
timeout=KafkaLogger._shutdown_timeout)
|
|
412
|
+
except Exception as e:
|
|
413
|
+
print(f"关闭时发送剩余日志失败: {e}")
|
|
414
|
+
|
|
415
|
+
@staticmethod
|
|
416
|
+
def close():
|
|
417
|
+
"""安全关闭资源,增强可靠性"""
|
|
418
|
+
if KafkaLogger._stop_event.is_set():
|
|
419
|
+
return
|
|
420
|
+
|
|
421
|
+
print("开始关闭Kafka日志系统...")
|
|
422
|
+
KafkaLogger._stop_event.set()
|
|
423
|
+
|
|
424
|
+
# 等待发送线程结束
|
|
425
|
+
if KafkaLogger._sender_thread and KafkaLogger._sender_thread.is_alive():
|
|
426
|
+
print(f"等待日志发送线程结束,超时时间: {KafkaLogger._shutdown_timeout}秒")
|
|
427
|
+
KafkaLogger._sender_thread.join(
|
|
428
|
+
timeout=KafkaLogger._shutdown_timeout)
|
|
429
|
+
|
|
430
|
+
# 如果线程仍在运行,强制终止(虽然daemon线程会自动终止,但这里显式处理)
|
|
431
|
+
if KafkaLogger._sender_thread.is_alive():
|
|
432
|
+
print("日志发送线程未能及时结束,将被强制终止")
|
|
433
|
+
|
|
434
|
+
# 关闭生产者
|
|
435
|
+
if KafkaLogger._producer:
|
|
436
|
+
try:
|
|
437
|
+
print("关闭Kafka生产者...")
|
|
438
|
+
KafkaLogger._producer.close(
|
|
439
|
+
timeout=KafkaLogger._shutdown_timeout)
|
|
440
|
+
print("Kafka生产者已关闭")
|
|
441
|
+
except Exception as e:
|
|
442
|
+
print(f"关闭Kafka生产者失败: {e}")
|
|
443
|
+
|
|
444
|
+
# 清空队列防止内存滞留
|
|
445
|
+
remaining = 0
|
|
446
|
+
while not KafkaLogger._log_queue.empty():
|
|
447
|
+
try:
|
|
448
|
+
KafkaLogger._log_queue.get_nowait()
|
|
449
|
+
remaining += 1
|
|
450
|
+
except Empty:
|
|
451
|
+
break
|
|
452
|
+
|
|
453
|
+
print(f"已清空日志队列,剩余日志数: {remaining}")
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
class SYLogger:
|
|
457
|
+
@staticmethod
|
|
458
|
+
def get_trace_id():
|
|
459
|
+
"""从上下文中获取当前的 trace_id"""
|
|
460
|
+
return current_trace_id.get()
|
|
461
|
+
|
|
462
|
+
@staticmethod
|
|
463
|
+
def set_trace_id(trace_id: str):
|
|
464
|
+
"""设置当前的 trace_id"""
|
|
465
|
+
return current_trace_id.set(trace_id)
|
|
466
|
+
|
|
467
|
+
@staticmethod
|
|
468
|
+
def reset_trace_id(token):
|
|
469
|
+
"""重置当前的 trace_id"""
|
|
470
|
+
current_trace_id.reset(token)
|
|
471
|
+
|
|
472
|
+
@staticmethod
|
|
473
|
+
def _get_execution_context() -> str:
|
|
474
|
+
"""获取当前执行上下文的线程或协程信息,返回格式化字符串"""
|
|
475
|
+
try:
|
|
476
|
+
# 尝试获取协程信息
|
|
477
|
+
task = asyncio.current_task()
|
|
478
|
+
if task:
|
|
479
|
+
task_name = task.get_name()
|
|
480
|
+
return f"coroutine:{task_name}"
|
|
481
|
+
except RuntimeError:
|
|
482
|
+
# 不在异步上下文中,获取线程信息
|
|
483
|
+
thread = threading.current_thread()
|
|
484
|
+
return f"thread:{thread.name}"
|
|
485
|
+
|
|
486
|
+
return "unknown"
|
|
487
|
+
|
|
488
|
+
@staticmethod
|
|
489
|
+
def _log(msg: any, level: str = "INFO"):
|
|
490
|
+
trace_id = SYLogger.get_trace_id()
|
|
491
|
+
|
|
492
|
+
if isinstance(msg, dict) or isinstance(msg, list):
|
|
493
|
+
msg_str = json.dumps(msg, ensure_ascii=False)
|
|
494
|
+
else:
|
|
495
|
+
msg_str = str(msg)
|
|
496
|
+
|
|
497
|
+
# 获取执行上下文信息并格式化为字符串
|
|
498
|
+
thread_info = SYLogger._get_execution_context()
|
|
499
|
+
|
|
500
|
+
# 构建日志结构,添加线程/协程信息到threadName字段
|
|
501
|
+
request_log = {
|
|
502
|
+
"trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
|
|
503
|
+
"message": msg_str,
|
|
504
|
+
"level": level,
|
|
505
|
+
"threadName": thread_info
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
# 选择日志级别
|
|
509
|
+
if level == "ERROR":
|
|
510
|
+
logger.error(json.dumps(request_log, ensure_ascii=False))
|
|
511
|
+
elif level == "WARNING":
|
|
512
|
+
logger.warning(json.dumps(request_log, ensure_ascii=False))
|
|
513
|
+
else:
|
|
514
|
+
logger.info(json.dumps(request_log, ensure_ascii=False))
|
|
515
|
+
|
|
516
|
+
@staticmethod
|
|
517
|
+
def info(msg: any):
|
|
518
|
+
SYLogger._log(msg, "INFO")
|
|
519
|
+
|
|
520
|
+
@staticmethod
|
|
521
|
+
def warning(msg: any):
|
|
522
|
+
SYLogger._log(msg, "WARNING")
|
|
523
|
+
|
|
524
|
+
@staticmethod
|
|
525
|
+
def error(msg: any):
|
|
526
|
+
SYLogger._log(msg, "ERROR")
|
|
527
|
+
|
|
528
|
+
@staticmethod
|
|
529
|
+
def exception(msg: any):
|
|
530
|
+
"""记录异常信息,包括完整堆栈"""
|
|
531
|
+
trace_id = SYLogger.get_trace_id()
|
|
532
|
+
|
|
533
|
+
if isinstance(msg, dict) or isinstance(msg, list):
|
|
534
|
+
msg_str = json.dumps(msg, ensure_ascii=False)
|
|
535
|
+
else:
|
|
536
|
+
msg_str = str(msg)
|
|
537
|
+
|
|
538
|
+
# 获取执行上下文信息
|
|
539
|
+
thread_info = SYLogger._get_execution_context()
|
|
540
|
+
|
|
541
|
+
# 构建包含异常堆栈的日志
|
|
542
|
+
request_log = {
|
|
543
|
+
"trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
|
|
544
|
+
"message": msg_str,
|
|
545
|
+
"level": "ERROR",
|
|
546
|
+
"threadName": thread_info
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
# 使用Loguru记录完整异常堆栈
|
|
550
|
+
logger.opt(exception=True).error(
|
|
551
|
+
json.dumps(request_log, ensure_ascii=False))
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class LoggerWrapper:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
self.logger = SYLogger
|
|
7
|
+
|
|
8
|
+
def info(self, msg):
|
|
9
|
+
self.logger.info(msg)
|
|
10
|
+
|
|
11
|
+
def error(self, msg):
|
|
12
|
+
self.logger.error(msg)
|
|
13
|
+
|
|
14
|
+
def warning(self, msg):
|
|
15
|
+
self.logger.warning(msg)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_logger_wrapper():
|
|
19
|
+
return LoggerWrapper()
|
|
File without changes
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def setup_cors_handler(app):
|
|
5
|
+
app.add_middleware(
|
|
6
|
+
CORSMiddleware,
|
|
7
|
+
allow_origins=["*"],
|
|
8
|
+
allow_credentials=True,
|
|
9
|
+
allow_methods=["*"],
|
|
10
|
+
allow_headers=["*"],
|
|
11
|
+
expose_headers=["*"],
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
return app
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from fastapi import Request, HTTPException
|
|
2
|
+
from fastapi.responses import JSONResponse
|
|
3
|
+
from pydantic import ValidationError
|
|
4
|
+
import traceback
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def setup_exception_handler(app, config: dict):
|
|
8
|
+
# 设置上传文件大小限制为 MaxBytes
|
|
9
|
+
app.config = {'MAX_CONTENT_LENGTH': config.get('MaxBytes', 209715200)}
|
|
10
|
+
|
|
11
|
+
# 1. 处理文件大小超限异常
|
|
12
|
+
@app.exception_handler(413)
|
|
13
|
+
async def request_entity_too_large(request: Request, exc):
|
|
14
|
+
MaxBytes = config.get('MaxBytes', 209715200)
|
|
15
|
+
int_MaxBytes = int(MaxBytes) / 1024 / 1024
|
|
16
|
+
return JSONResponse(
|
|
17
|
+
content={
|
|
18
|
+
'code': 413, 'error': f'File size exceeds the allowed limit of {int_MaxBytes}MB.'},
|
|
19
|
+
status_code=413
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# 2. 处理 HTTP 异常
|
|
23
|
+
@app.exception_handler(HTTPException)
|
|
24
|
+
async def http_exception_handler(request: Request, exc: HTTPException):
|
|
25
|
+
return JSONResponse(
|
|
26
|
+
status_code=exc.status_code,
|
|
27
|
+
content={
|
|
28
|
+
"code": exc.status_code,
|
|
29
|
+
"message": exc.detail,
|
|
30
|
+
"path": str(request.url.path)
|
|
31
|
+
}
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# 3. 处理 Pydantic 验证错误
|
|
35
|
+
@app.exception_handler(ValidationError)
|
|
36
|
+
async def validation_exception_handler(request: Request, exc: ValidationError):
|
|
37
|
+
return JSONResponse(
|
|
38
|
+
status_code=400,
|
|
39
|
+
content={
|
|
40
|
+
"code": 400,
|
|
41
|
+
"message": "参数验证失败",
|
|
42
|
+
"details": exc.errors()
|
|
43
|
+
}
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# 4. 自定义业务异常
|
|
47
|
+
class BusinessException(Exception):
|
|
48
|
+
def __init__(self, code: int, message: str):
|
|
49
|
+
self.code = code
|
|
50
|
+
self.message = message
|
|
51
|
+
|
|
52
|
+
@app.exception_handler(BusinessException)
|
|
53
|
+
async def business_exception_handler(request: Request, exc: BusinessException):
|
|
54
|
+
return JSONResponse(
|
|
55
|
+
status_code=exc.code,
|
|
56
|
+
content={
|
|
57
|
+
"code": exc.code,
|
|
58
|
+
"message": exc.message
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# 5. 全局异常处理器(捕获所有未处理的异常)
|
|
63
|
+
@app.exception_handler(Exception)
|
|
64
|
+
async def global_exception_handler(request: Request, exc: Exception):
|
|
65
|
+
# 记录详细错误信息
|
|
66
|
+
error_msg = f"请求路径: {request.url}\n"
|
|
67
|
+
error_msg += f"错误类型: {type(exc).__name__}\n"
|
|
68
|
+
error_msg += f"错误信息: {str(exc)}\n"
|
|
69
|
+
error_msg += f"堆栈信息: {traceback.format_exc()}"
|
|
70
|
+
|
|
71
|
+
# 使用你的日志服务记录错误
|
|
72
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
73
|
+
SYLogger.error(error_msg)
|
|
74
|
+
|
|
75
|
+
# 返回统一格式的错误响应(生产环境可选择不返回详细信息)
|
|
76
|
+
return JSONResponse(
|
|
77
|
+
status_code=500,
|
|
78
|
+
content={
|
|
79
|
+
"code": 500,
|
|
80
|
+
"message": "服务器内部错误,请稍后重试",
|
|
81
|
+
"detail": str(exc) if config.get('DEBUG', False) else "Internal Server Error"
|
|
82
|
+
}
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
return app
|