sycommon-python-lib 0.1.16__py3-none-any.whl → 0.1.56b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. sycommon/config/Config.py +6 -2
  2. sycommon/config/RerankerConfig.py +1 -0
  3. sycommon/database/async_base_db_service.py +36 -0
  4. sycommon/database/async_database_service.py +96 -0
  5. sycommon/database/database_service.py +6 -1
  6. sycommon/health/metrics.py +13 -0
  7. sycommon/llm/__init__.py +0 -0
  8. sycommon/llm/embedding.py +149 -0
  9. sycommon/llm/get_llm.py +177 -0
  10. sycommon/llm/llm_logger.py +126 -0
  11. sycommon/logging/async_sql_logger.py +65 -0
  12. sycommon/logging/kafka_log.py +36 -14
  13. sycommon/logging/logger_levels.py +23 -0
  14. sycommon/logging/sql_logger.py +53 -0
  15. sycommon/middleware/context.py +2 -0
  16. sycommon/middleware/middleware.py +4 -0
  17. sycommon/middleware/traceid.py +155 -32
  18. sycommon/models/mqlistener_config.py +1 -0
  19. sycommon/rabbitmq/rabbitmq_client.py +377 -821
  20. sycommon/rabbitmq/rabbitmq_pool.py +338 -0
  21. sycommon/rabbitmq/rabbitmq_service.py +411 -229
  22. sycommon/services.py +116 -61
  23. sycommon/synacos/example.py +153 -0
  24. sycommon/synacos/example2.py +129 -0
  25. sycommon/synacos/feign.py +90 -413
  26. sycommon/synacos/feign_client.py +335 -0
  27. sycommon/synacos/nacos_service.py +159 -106
  28. sycommon/synacos/param.py +75 -0
  29. sycommon/tools/merge_headers.py +97 -0
  30. sycommon/tools/snowflake.py +296 -7
  31. {sycommon_python_lib-0.1.16.dist-info → sycommon_python_lib-0.1.56b1.dist-info}/METADATA +19 -13
  32. sycommon_python_lib-0.1.56b1.dist-info/RECORD +68 -0
  33. sycommon_python_lib-0.1.16.dist-info/RECORD +0 -52
  34. {sycommon_python_lib-0.1.16.dist-info → sycommon_python_lib-0.1.56b1.dist-info}/WHEEL +0 -0
  35. {sycommon_python_lib-0.1.16.dist-info → sycommon_python_lib-0.1.56b1.dist-info}/entry_points.txt +0 -0
  36. {sycommon_python_lib-0.1.16.dist-info → sycommon_python_lib-0.1.56b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,65 @@
1
+ from sqlalchemy import event
2
+ from sqlalchemy.ext.asyncio import AsyncEngine
3
+ from sycommon.logging.kafka_log import SYLogger
4
+ import time
5
+ from datetime import datetime
6
+ from decimal import Decimal
7
+
8
+
9
+ class AsyncSQLTraceLogger:
10
+ @staticmethod
11
+ def setup_sql_logging(engine):
12
+ """
13
+ 为 SQLAlchemy 异步引擎注册事件监听器
14
+ 注意:必须监听 engine.sync_engine,而不能直接监听 AsyncEngine
15
+ """
16
+ def serialize_params(params):
17
+ """处理特殊类型参数的序列化"""
18
+ if isinstance(params, (list, tuple)):
19
+ return [serialize_params(p) for p in params]
20
+ elif isinstance(params, dict):
21
+ return {k: serialize_params(v) for k, v in params.items()}
22
+ elif isinstance(params, datetime):
23
+ return params.isoformat()
24
+ elif isinstance(params, Decimal):
25
+ return float(params)
26
+ else:
27
+ return params
28
+
29
+ # ========== 核心修改 ==========
30
+ # 必须通过 engine.sync_engine 来获取底层的同步引擎进行监听
31
+ target = engine.sync_engine
32
+
33
+ @event.listens_for(target, "after_cursor_execute")
34
+ def after_cursor_execute(
35
+ conn, cursor, statement, parameters, context, executemany
36
+ ):
37
+ try:
38
+ # 从连接选项中获取开始时间
39
+ # conn 在这里是同步连接对象
40
+ start_time = conn.info.get('_start_time') or \
41
+ conn._execution_options.get("_start_time", time.time())
42
+
43
+ execution_time = (time.time() - start_time) * 1000
44
+
45
+ sql_log = {
46
+ "type": "SQL",
47
+ "statement": statement,
48
+ "parameters": serialize_params(parameters),
49
+ "execution_time_ms": round(execution_time, 2),
50
+ }
51
+
52
+ # 注意:SYLogger.info 必须是线程安全的或非阻塞的,否则可能影响异步性能
53
+ SYLogger.info(f"SQL执行: {sql_log}")
54
+ except Exception as e:
55
+ SYLogger.error(f"SQL日志处理失败: {str(e)}")
56
+
57
+ @event.listens_for(target, "before_cursor_execute")
58
+ def before_cursor_execute(
59
+ conn, cursor, statement, parameters, context, executemany
60
+ ):
61
+ try:
62
+ # 记录开始时间到 execution_options
63
+ conn = conn.execution_options(_start_time=time.time())
64
+ except Exception as e:
65
+ SYLogger.error(f"SQL开始时间记录失败: {str(e)}")
@@ -15,7 +15,7 @@ from kafka import KafkaProducer
15
15
  from loguru import logger
16
16
  import loguru
17
17
  from sycommon.config.Config import Config, SingletonMeta
18
- from sycommon.middleware.context import current_trace_id
18
+ from sycommon.middleware.context import current_trace_id, current_headers
19
19
  from sycommon.tools.snowflake import Snowflake
20
20
 
21
21
  # 配置Loguru的颜色方案
@@ -114,7 +114,7 @@ class KafkaLogger(metaclass=SingletonMeta):
114
114
  trace_id = None
115
115
 
116
116
  if not trace_id:
117
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
117
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
118
118
 
119
119
  # 获取线程/协程信息
120
120
  thread_info = SYLogger._get_execution_context()
@@ -173,7 +173,7 @@ class KafkaLogger(metaclass=SingletonMeta):
173
173
  "className": "",
174
174
  "sqlCost": 0,
175
175
  "size": len(str(message)),
176
- "uid": int(trace_id) if trace_id and trace_id.isdigit() else 0
176
+ "uid": int(Snowflake.id) # 独立新的id
177
177
  }
178
178
 
179
179
  # 智能队列管理
@@ -212,7 +212,7 @@ class KafkaLogger(metaclass=SingletonMeta):
212
212
  return
213
213
 
214
214
  # 获取当前的trace_id
215
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
215
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
216
216
 
217
217
  # 构建错误日志
218
218
  error_log = {
@@ -441,6 +441,18 @@ class SYLogger:
441
441
  """重置当前的 trace_id"""
442
442
  current_trace_id.reset(token)
443
443
 
444
+ @staticmethod
445
+ def get_headers():
446
+ return current_headers.get()
447
+
448
+ @staticmethod
449
+ def set_headers(headers: list[tuple[str, str]]):
450
+ return current_headers.set(headers)
451
+
452
+ @staticmethod
453
+ def reset_headers(token):
454
+ current_headers.reset(token)
455
+
444
456
  @staticmethod
445
457
  def _get_execution_context() -> str:
446
458
  """获取当前执行上下文的线程或协程信息,返回格式化字符串"""
@@ -459,7 +471,7 @@ class SYLogger:
459
471
 
460
472
  @staticmethod
461
473
  def _log(msg: any, level: str = "INFO"):
462
- trace_id = SYLogger.get_trace_id()
474
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
463
475
 
464
476
  if isinstance(msg, dict) or isinstance(msg, list):
465
477
  msg_str = json.dumps(msg, ensure_ascii=False)
@@ -470,12 +482,22 @@ class SYLogger:
470
482
  thread_info = SYLogger._get_execution_context()
471
483
 
472
484
  # 构建日志结构,添加线程/协程信息到threadName字段
473
- request_log = {
474
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
475
- "message": msg_str,
476
- "level": level,
477
- "threadName": thread_info
478
- }
485
+ request_log = {}
486
+ if level == "ERROR":
487
+ request_log = {
488
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
489
+ "message": msg_str,
490
+ "traceback": traceback.format_exc(),
491
+ "level": level,
492
+ "threadName": thread_info
493
+ }
494
+ else:
495
+ request_log = {
496
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
497
+ "message": msg_str,
498
+ "level": level,
499
+ "threadName": thread_info
500
+ }
479
501
 
480
502
  # 选择日志级别
481
503
  _log = ''
@@ -490,7 +512,7 @@ class SYLogger:
490
512
  logger.info(_log)
491
513
 
492
514
  if os.getenv('DEV-LOG', 'false').lower() == 'true':
493
- pprint(_log)
515
+ pprint.pprint(_log)
494
516
 
495
517
  @staticmethod
496
518
  def info(msg: any, *args, **kwargs):
@@ -511,7 +533,7 @@ class SYLogger:
511
533
  @staticmethod
512
534
  def exception(msg: any, *args, **kwargs):
513
535
  """记录异常信息,包括完整堆栈"""
514
- trace_id = SYLogger.get_trace_id()
536
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
515
537
 
516
538
  if isinstance(msg, dict) or isinstance(msg, list):
517
539
  msg_str = json.dumps(msg, ensure_ascii=False)
@@ -523,7 +545,7 @@ class SYLogger:
523
545
 
524
546
  # 构建包含异常堆栈的日志
525
547
  request_log = {
526
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
548
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
527
549
  "message": msg_str,
528
550
  "level": "ERROR",
529
551
  "threadName": thread_info
@@ -0,0 +1,23 @@
1
+ import logging
2
+
3
+
4
+ def setup_logger_levels():
5
+ """配置各模块的日志级别,抑制无关INFO/DEBUG日志"""
6
+ # Nacos 客户端:仅输出WARNING及以上(屏蔽INFO级的心跳/注册日志)
7
+ logging.getLogger("nacos.client").setLevel(logging.WARNING)
8
+
9
+ # Kafka Python客户端:屏蔽INFO级的连接/版本检测日志
10
+ logging.getLogger("kafka.conn").setLevel(logging.WARNING)
11
+ logging.getLogger("kafka.producer").setLevel(logging.WARNING)
12
+
13
+ # Uvicorn/FastAPI:屏蔽启动/应用初始化的INFO日志(保留ERROR/WARNING)
14
+ # logging.getLogger("uvicorn").setLevel(logging.WARNING)
15
+ # logging.getLogger("uvicorn.access").setLevel(logging.WARNING) # 屏蔽访问日志
16
+ # logging.getLogger("uvicorn.error").setLevel(logging.ERROR) # 仅保留错误
17
+
18
+ # 自定义的root日志(如同步数据库/监听器初始化):屏蔽INFO
19
+ logging.getLogger("root").setLevel(logging.WARNING)
20
+
21
+ # RabbitMQ相关日志(如果有专属日志器)
22
+ logging.getLogger("pika").setLevel(logging.WARNING) # 若使用pika客户端
23
+ logging.getLogger("rabbitmq").setLevel(logging.WARNING)
@@ -0,0 +1,53 @@
1
+ from sqlalchemy import event
2
+ from sqlalchemy.engine import Engine
3
+ from sycommon.logging.kafka_log import SYLogger
4
+ import time
5
+ from datetime import datetime
6
+ from decimal import Decimal
7
+
8
+
9
+ class SQLTraceLogger:
10
+ @staticmethod
11
+ def setup_sql_logging(engine: Engine):
12
+ """为 SQLAlchemy 引擎注册事件监听器"""
13
+ def serialize_params(params):
14
+ """处理特殊类型参数的序列化"""
15
+ if isinstance(params, (list, tuple)):
16
+ return [serialize_params(p) for p in params]
17
+ elif isinstance(params, dict):
18
+ return {k: serialize_params(v) for k, v in params.items()}
19
+ elif isinstance(params, datetime):
20
+ return params.isoformat()
21
+ elif isinstance(params, Decimal):
22
+ return float(params)
23
+ else:
24
+ return params
25
+
26
+ @event.listens_for(Engine, "after_cursor_execute")
27
+ def after_cursor_execute(
28
+ conn, cursor, statement, parameters, context, executemany
29
+ ):
30
+ try:
31
+ start_time = conn._execution_options.get(
32
+ "_start_time", time.time())
33
+ execution_time = (time.time() - start_time) * 1000
34
+
35
+ sql_log = {
36
+ "type": "SQL",
37
+ "statement": statement,
38
+ "parameters": serialize_params(parameters),
39
+ "execution_time_ms": round(execution_time, 2),
40
+ }
41
+
42
+ SYLogger.info(f"SQL执行: {sql_log}")
43
+ except Exception as e:
44
+ SYLogger.error(f"SQL日志处理失败: {str(e)}")
45
+
46
+ @event.listens_for(Engine, "before_cursor_execute")
47
+ def before_cursor_execute(
48
+ conn, cursor, statement, parameters, context, executemany
49
+ ):
50
+ try:
51
+ conn = conn.execution_options(_start_time=time.time())
52
+ except Exception as e:
53
+ SYLogger.error(f"SQL开始时间记录失败: {str(e)}")
@@ -1,3 +1,5 @@
1
1
  import contextvars
2
2
 
3
3
  current_trace_id = contextvars.ContextVar("trace_id", default=None)
4
+
5
+ current_headers = contextvars.ContextVar("headers", default=None)
@@ -1,3 +1,4 @@
1
+ from sycommon.health.metrics import setup_metrics_handler
1
2
  from sycommon.health.ping import setup_ping_handler
2
3
  from sycommon.middleware.cors import setup_cors_handler
3
4
  from sycommon.middleware.docs import setup_docs_handler
@@ -34,6 +35,9 @@ class Middleware:
34
35
  # ping
35
36
  app = setup_ping_handler(app)
36
37
 
38
+ # metrics
39
+ app = setup_metrics_handler(app)
40
+
37
41
  # 添加mq中间件
38
42
  # app = setup_mq_middleware(app)
39
43
 
@@ -3,33 +3,53 @@ import re
3
3
  from typing import Dict, Any
4
4
  from fastapi import Request, Response
5
5
  from sycommon.logging.kafka_log import SYLogger
6
+ from sycommon.tools.merge_headers import merge_headers
6
7
  from sycommon.tools.snowflake import Snowflake
7
8
 
8
9
 
9
10
  def setup_trace_id_handler(app):
10
11
  @app.middleware("http")
11
12
  async def trace_id_and_log_middleware(request: Request, call_next):
12
- # 生成或获取 traceId
13
- trace_id = request.headers.get("x-traceId-header")
13
+ # ========== 1. 请求阶段:确保获取/生成 x-traceId-header ==========
14
+ # 优先从请求头读取(兼容任意大小写)
15
+ trace_id = request.headers.get(
16
+ "x-traceId-header") or request.headers.get("x-traceid-header")
17
+ # 无则生成雪花ID
14
18
  if not trace_id:
15
- trace_id = Snowflake.next_id()
19
+ trace_id = Snowflake.id
16
20
 
17
- # 设置 trace_id 上下文
21
+ # 设置 trace_id 到日志上下文
18
22
  token = SYLogger.set_trace_id(trace_id)
23
+ header_token = SYLogger.set_headers(request.headers.raw)
19
24
 
20
25
  # 获取请求参数
21
26
  query_params = dict(request.query_params)
22
27
  request_body: Dict[str, Any] = {}
23
28
  files_info: Dict[str, str] = {}
24
29
 
25
- # 检测请求内容类型
30
+ json_content_types = [
31
+ "application/json",
32
+ "text/plain;charset=utf-8",
33
+ "text/plain"
34
+ ]
26
35
  content_type = request.headers.get("content-type", "").lower()
36
+ is_json_content = any(ct in content_type for ct in json_content_types)
27
37
 
28
- if "application/json" in content_type and request.method in ["POST", "PUT", "PATCH"]:
38
+ if is_json_content and request.method in ["POST", "PUT", "PATCH"]:
29
39
  try:
30
- request_body = await request.json()
40
+ # 兼容纯文本格式的 JSON(先读文本再解析)
41
+ if "text/plain" in content_type:
42
+ raw_text = await request.text(encoding="utf-8")
43
+ request_body = json.loads(raw_text)
44
+ else:
45
+ # application/json 直接解析
46
+ request_body = await request.json()
31
47
  except Exception as e:
32
- request_body = {"error": f"Failed to parse JSON: {str(e)}"}
48
+ try:
49
+ request_body = await request.json()
50
+ except Exception as e:
51
+ # 精准捕获 JSON 解析错误(而非泛 Exception)
52
+ request_body = {"error": f"JSON parse failed: {str(e)}"}
33
53
 
34
54
  elif "multipart/form-data" in content_type and request.method in ["POST", "PUT"]:
35
55
  try:
@@ -62,8 +82,9 @@ def setup_trace_id_handler(app):
62
82
  request_body = {
63
83
  "error": f"Failed to process form data: {str(e)}"}
64
84
 
65
- # 构建请求日志信息
85
+ # 构建请求日志(包含 traceId)
66
86
  request_message = {
87
+ "traceId": trace_id, # 请求日志中加入 traceId
67
88
  "method": request.method,
68
89
  "url": str(request.url),
69
90
  "query_params": query_params,
@@ -77,68 +98,159 @@ def setup_trace_id_handler(app):
77
98
  # 处理请求
78
99
  response = await call_next(request)
79
100
 
80
- content_type = response.headers.get("Content-Type", "")
101
+ # 获取响应Content-Type(统一小写)
102
+ content_type = response.headers.get("content-type", "").lower()
81
103
 
82
- # 处理 SSE 响应 - 关键修复点
104
+ # ========== 2. SSE 响应:仅设置 x-traceId-header,不修改其他头 ==========
83
105
  if "text/event-stream" in content_type:
84
- # 流式响应不能有Content-Length,移除它
85
- if "Content-Length" in response.headers:
86
- del response.headers["Content-Length"]
87
- response.headers["x-traceId-header"] = trace_id
106
+ try:
107
+ # 强制写入 x-traceId-header 到响应头
108
+ response.headers["x-traceId-header"] = trace_id
109
+ # 确保前端能读取(仅补充暴露头,不覆盖原有值)
110
+ expose_headers = response.headers.get(
111
+ "access-control-expose-headers", "")
112
+ if expose_headers:
113
+ if "x-traceId-header" not in expose_headers.lower():
114
+ response.headers[
115
+ "access-control-expose-headers"] = f"{expose_headers}, x-traceId-header"
116
+ else:
117
+ response.headers["access-control-expose-headers"] = "x-traceId-header"
118
+ # SSE 必须移除 Content-Length(仅这一个额外操作)
119
+ headers_lower = {
120
+ k.lower(): k for k in response.headers.keys()}
121
+ if "content-length" in headers_lower:
122
+ del response.headers[headers_lower["content-length"]]
123
+ except AttributeError:
124
+ # 流式响应头只读:初始化时仅加入 traceId 和必要暴露头
125
+ new_headers = dict(response.headers) if hasattr(
126
+ response.headers, 'items') else {}
127
+ new_headers["x-traceId-header"] = trace_id # 强制加入
128
+ # 保留原有暴露头,补充 traceId
129
+ if "access-control-expose-headers" in new_headers:
130
+ if "x-traceId-header" not in new_headers["access-control-expose-headers"].lower():
131
+ new_headers["access-control-expose-headers"] += ", x-traceId-header"
132
+ else:
133
+ new_headers["access-control-expose-headers"] = "x-traceId-header"
134
+ # 移除 Content-Length
135
+ new_headers.pop("content-length", None)
136
+ response.init_headers(new_headers)
88
137
  return response
89
138
 
90
- # 处理普通响应
139
+ # ========== 3. 非 SSE 响应:强制写入 x-traceId-header,保留 CORS ==========
140
+ # 备份 CORS 头(防止丢失)
141
+ cors_headers = {}
142
+ cors_header_keys = [
143
+ "access-control-allow-origin",
144
+ "access-control-allow-methods",
145
+ "access-control-allow-headers",
146
+ "access-control-expose-headers",
147
+ "access-control-allow-credentials",
148
+ "access-control-max-age"
149
+ ]
150
+ for key in cors_header_keys:
151
+ for k in response.headers.keys():
152
+ if k.lower() == key:
153
+ cors_headers[key] = response.headers[k]
154
+ break
155
+
156
+ # 合并 headers(非 SSE 场景)
157
+ merged_headers = merge_headers(
158
+ source_headers=request.headers,
159
+ target_headers=response.headers,
160
+ keep_keys=None,
161
+ delete_keys={'content-length', 'accept', 'content-type'}
162
+ )
163
+
164
+ # 强制加入 x-traceId-header(优先级最高)
165
+ merged_headers["x-traceId-header"] = trace_id
166
+ # 恢复 CORS 头 + 补充 traceId 到暴露头
167
+ merged_headers.update(cors_headers)
168
+ expose_headers = merged_headers.get(
169
+ "access-control-expose-headers", "")
170
+ if expose_headers:
171
+ if "x-traceId-header" not in expose_headers.lower():
172
+ merged_headers["access-control-expose-headers"] = f"{expose_headers}, x-traceId-header"
173
+ else:
174
+ merged_headers["access-control-expose-headers"] = "x-traceId-header"
175
+
176
+ # 更新响应头
177
+ if hasattr(response.headers, 'clear'):
178
+ response.headers.clear()
179
+ for k, v in merged_headers.items():
180
+ response.headers[k] = v
181
+ elif hasattr(response, "init_headers"):
182
+ response.init_headers(merged_headers)
183
+ else:
184
+ for k, v in merged_headers.items():
185
+ try:
186
+ response.headers[k] = v
187
+ except (AttributeError, KeyError):
188
+ pass
189
+
190
+ # 处理普通响应体(JSON 加入 traceId)
91
191
  response_body = b""
92
192
  try:
93
- # 收集所有响应块
94
193
  async for chunk in response.body_iterator:
95
194
  response_body += chunk
96
195
 
196
+ # 获取 Content-Disposition(统一小写)
97
197
  content_disposition = response.headers.get(
98
- "Content-Disposition", "")
198
+ "content-disposition", "").lower()
99
199
 
100
- # 判断是否能添加 trace_id
200
+ # JSON 响应体加入 traceId
101
201
  if "application/json" in content_type and not content_disposition.startswith("attachment"):
102
202
  try:
103
203
  data = json.loads(response_body)
104
- data["traceId"] = trace_id
105
- new_body = json.dumps(
106
- data, ensure_ascii=False).encode()
204
+ new_body = response_body
205
+ if data:
206
+ data["traceId"] = trace_id # 响应体也加入
207
+ new_body = json.dumps(
208
+ data, ensure_ascii=False).encode()
107
209
 
108
- # 创建新响应,确保Content-Length正确
210
+ # 重建响应,确保 header 包含 x-traceId-header
109
211
  response = Response(
110
212
  content=new_body,
111
213
  status_code=response.status_code,
112
214
  headers=dict(response.headers),
113
215
  media_type=response.media_type
114
216
  )
115
- # 显式设置正确的Content-Length
116
- response.headers["Content-Length"] = str(len(new_body))
217
+ response.headers["content-length"] = str(len(new_body))
218
+ response.headers["x-traceId-header"] = trace_id # 再次兜底
219
+ # 恢复 CORS 头
220
+ for k, v in cors_headers.items():
221
+ response.headers[k] = v
117
222
  except json.JSONDecodeError:
118
- # 如果不是JSON,恢复原始响应体并更新长度
223
+ # JSON 响应:仅更新长度,强制加入 traceId
119
224
  response = Response(
120
225
  content=response_body,
121
226
  status_code=response.status_code,
122
227
  headers=dict(response.headers),
123
228
  media_type=response.media_type
124
229
  )
125
- response.headers["Content-Length"] = str(
230
+ response.headers["content-length"] = str(
126
231
  len(response_body))
232
+ response.headers["x-traceId-header"] = trace_id # 强制加入
233
+ for k, v in cors_headers.items():
234
+ response.headers[k] = v
127
235
  else:
128
- # 非JSON响应,恢复原始响应体
236
+ # 非 JSON 响应:强制加入 traceId
129
237
  response = Response(
130
238
  content=response_body,
131
239
  status_code=response.status_code,
132
240
  headers=dict(response.headers),
133
241
  media_type=response.media_type
134
242
  )
135
- response.headers["Content-Length"] = str(
243
+ response.headers["content-length"] = str(
136
244
  len(response_body))
245
+ response.headers["x-traceId-header"] = trace_id # 强制加入
246
+ for k, v in cors_headers.items():
247
+ response.headers[k] = v
137
248
  except StopAsyncIteration:
138
249
  pass
139
250
 
140
- # 构建响应日志信息
251
+ # 构建响应日志(包含 traceId)
141
252
  response_message = {
253
+ "traceId": trace_id, # 响应日志加入 traceId
142
254
  "status_code": response.status_code,
143
255
  "response_body": response_body.decode('utf-8', errors='ignore'),
144
256
  }
@@ -146,11 +258,21 @@ def setup_trace_id_handler(app):
146
258
  response_message, ensure_ascii=False)
147
259
  SYLogger.info(response_message_str)
148
260
 
149
- response.headers["x-traceId-header"] = trace_id
261
+ # ========== 最终兜底:确保响应头必有 x-traceId-header ==========
262
+ try:
263
+ response.headers["x-traceId-header"] = trace_id
264
+ except AttributeError:
265
+ new_headers = dict(response.headers) if hasattr(
266
+ response.headers, 'items') else {}
267
+ new_headers["x-traceId-header"] = trace_id
268
+ if hasattr(response, "init_headers"):
269
+ response.init_headers(new_headers)
150
270
 
151
271
  return response
152
272
  except Exception as e:
273
+ # 异常日志也加入 traceId
153
274
  error_message = {
275
+ "traceId": trace_id,
154
276
  "error": str(e),
155
277
  "query_params": query_params,
156
278
  "request_body": request_body,
@@ -160,7 +282,8 @@ def setup_trace_id_handler(app):
160
282
  SYLogger.error(error_message_str)
161
283
  raise
162
284
  finally:
163
- # 清理上下文变量,防止泄漏
285
+ # 清理上下文变量
164
286
  SYLogger.reset_trace_id(token)
287
+ SYLogger.reset_headers(header_token)
165
288
 
166
289
  return app
@@ -31,6 +31,7 @@ class RabbitMQListenerConfig(BaseModel):
31
31
  durable: bool = Field(True, description="是否持久化")
32
32
  auto_delete: bool = Field(False, description="是否自动删除队列")
33
33
  auto_parse_json: bool = Field(True, description="是否自动解析JSON消息")
34
+ prefetch_count: int = Field(2, description="mq同时消费数量")
34
35
 
35
36
  class Config:
36
37
  """模型配置"""