sycommon-python-lib 0.1.53__py3-none-any.whl → 0.1.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ from contextlib import asynccontextmanager
2
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
3
+ from sycommon.config.Config import SingletonMeta
4
+ from sycommon.database.async_database_service import AsyncDatabaseService
5
+ from sycommon.logging.kafka_log import SYLogger
6
+
7
+
8
+ class AsyncBaseDBService(metaclass=SingletonMeta):
9
+ """数据库操作基础服务类,封装异步会话管理功能"""
10
+
11
+ def __init__(self):
12
+ # 获取异步引擎 (假设 DatabaseService.engine() 返回的是 AsyncEngine)
13
+ self.engine = AsyncDatabaseService.engine()
14
+
15
+ # 创建异步 Session 工厂
16
+ # class_=AsyncSession 是必须的,用于指定生成的是异步会话
17
+ self.Session = async_sessionmaker(
18
+ bind=self.engine,
19
+ class_=AsyncSession,
20
+ expire_on_commit=False
21
+ )
22
+
23
+ @asynccontextmanager
24
+ async def session(self):
25
+ """
26
+ 异步数据库会话上下文管理器
27
+ 自动处理会话的创建、提交、回滚和关闭
28
+ """
29
+ async with self.Session() as session:
30
+ try:
31
+ yield session
32
+ await session.commit()
33
+ except Exception as e:
34
+ await session.rollback()
35
+ SYLogger.error(f"Database operation failed: {str(e)}")
36
+ raise
@@ -0,0 +1,96 @@
1
+ from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
2
+ from sqlalchemy import text
3
+
4
+ from sycommon.config.Config import SingletonMeta
5
+ from sycommon.config.DatabaseConfig import DatabaseConfig, convert_dict_keys
6
+ from sycommon.logging.kafka_log import SYLogger
7
+ from sycommon.logging.async_sql_logger import AsyncSQLTraceLogger
8
+ from sycommon.synacos.nacos_service import NacosService
9
+
10
+
11
+ class AsyncDatabaseService(metaclass=SingletonMeta):
12
+ _engine = None
13
+
14
+ @staticmethod
15
+ async def setup_database(config: dict, shareConfigKey: str):
16
+ common = NacosService(config).share_configs.get(shareConfigKey, {})
17
+ if common and common.get('spring', {}).get('datasource', None):
18
+ databaseConfig = common.get('spring', {}).get('datasource', None)
19
+ converted_dict = convert_dict_keys(databaseConfig)
20
+ db_config = DatabaseConfig.model_validate(converted_dict)
21
+
22
+ # 初始化 DatabaseConnector (传入配置)
23
+ connector = AsyncDatabaseConnector(db_config)
24
+
25
+ # 赋值 engine
26
+ AsyncDatabaseService._engine = connector.engine
27
+
28
+ # 执行异步测试连接
29
+ if not await connector.test_connection():
30
+ raise Exception("Database connection test failed")
31
+
32
+ @staticmethod
33
+ def engine():
34
+ return AsyncDatabaseService._engine
35
+
36
+
37
+ class AsyncDatabaseConnector(metaclass=SingletonMeta):
38
+ def __init__(self, db_config: DatabaseConfig):
39
+ # 从 DatabaseConfig 中提取数据库连接信息
40
+ self.db_user = db_config.username
41
+ self.db_password = db_config.password
42
+
43
+ # 提取 URL 中的主机、端口和数据库名
44
+ url_parts = db_config.url.split('//')[1].split('/')
45
+ host_port = url_parts[0].split(':')
46
+ self.db_host = host_port[0]
47
+ self.db_port = host_port[1]
48
+ self.db_name = url_parts[1].split('?')[0]
49
+
50
+ # 提取 URL 中的参数
51
+ params_str = url_parts[1].split('?')[1] if len(
52
+ url_parts[1].split('?')) > 1 else ''
53
+ params = {}
54
+ for param in params_str.split('&'):
55
+ if param:
56
+ key, value = param.split('=')
57
+ params[key] = value
58
+
59
+ # 在params中去掉指定的参数
60
+ for key in ['useUnicode', 'characterEncoding', 'serverTimezone', 'zeroDateTimeBehavior']:
61
+ if key in params:
62
+ del params[key]
63
+
64
+ # 构建数据库连接 URL
65
+ # 注意:这里将 mysqlconnector 替换为 aiomysql 以支持异步
66
+ self.db_url = f'mysql+aiomysql://{self.db_user}:{self.db_password}@{self.db_host}:{self.db_port}/{self.db_name}'
67
+
68
+ SYLogger.info(f"Database URL: {self.db_url}")
69
+
70
+ # 优化连接池配置
71
+ # 使用 create_async_engine 替代 create_engine
72
+ self.engine = create_async_engine(
73
+ self.db_url,
74
+ connect_args=params,
75
+ pool_size=10, # 连接池大小
76
+ max_overflow=20, # 最大溢出连接数
77
+ pool_timeout=30, # 连接超时时间(秒)
78
+ pool_recycle=3600, # 连接回收时间(秒)
79
+ pool_pre_ping=True, # 每次获取连接前检查连接是否有效
80
+ echo=False, # 打印 SQL 语句
81
+ )
82
+
83
+ # 注册 SQL 日志拦截器 (注意:SQLTraceLogger 需要支持异步引擎,或者您可能需要调整日志逻辑)
84
+ # 假设 SQLTraceLogger.setup_sql_logging 能够处理 AsyncEngine
85
+ AsyncSQLTraceLogger.setup_sql_logging(self.engine)
86
+
87
+ async def test_connection(self):
88
+ try:
89
+ # 异步上下文管理器
90
+ async with self.engine.connect() as connection:
91
+ # 执行简单查询
92
+ await connection.execute(text("SELECT 1"))
93
+ return True
94
+ except Exception as e:
95
+ SYLogger.error(f"Database connection test failed: {e}")
96
+ return False
@@ -0,0 +1,65 @@
1
+ from sqlalchemy import event
2
+ from sqlalchemy.ext.asyncio import AsyncEngine
3
+ from sycommon.logging.kafka_log import SYLogger
4
+ import time
5
+ from datetime import datetime
6
+ from decimal import Decimal
7
+
8
+
9
+ class AsyncSQLTraceLogger:
10
+ @staticmethod
11
+ def setup_sql_logging(engine):
12
+ """
13
+ 为 SQLAlchemy 异步引擎注册事件监听器
14
+ 注意:必须监听 engine.sync_engine,而不能直接监听 AsyncEngine
15
+ """
16
+ def serialize_params(params):
17
+ """处理特殊类型参数的序列化"""
18
+ if isinstance(params, (list, tuple)):
19
+ return [serialize_params(p) for p in params]
20
+ elif isinstance(params, dict):
21
+ return {k: serialize_params(v) for k, v in params.items()}
22
+ elif isinstance(params, datetime):
23
+ return params.isoformat()
24
+ elif isinstance(params, Decimal):
25
+ return float(params)
26
+ else:
27
+ return params
28
+
29
+ # ========== 核心修改 ==========
30
+ # 必须通过 engine.sync_engine 来获取底层的同步引擎进行监听
31
+ target = engine.sync_engine
32
+
33
+ @event.listens_for(target, "after_cursor_execute")
34
+ def after_cursor_execute(
35
+ conn, cursor, statement, parameters, context, executemany
36
+ ):
37
+ try:
38
+ # 从连接选项中获取开始时间
39
+ # conn 在这里是同步连接对象
40
+ start_time = conn.info.get('_start_time') or \
41
+ conn._execution_options.get("_start_time", time.time())
42
+
43
+ execution_time = (time.time() - start_time) * 1000
44
+
45
+ sql_log = {
46
+ "type": "SQL",
47
+ "statement": statement,
48
+ "parameters": serialize_params(parameters),
49
+ "execution_time_ms": round(execution_time, 2),
50
+ }
51
+
52
+ # 注意:SYLogger.info 必须是线程安全的或非阻塞的,否则可能影响异步性能
53
+ SYLogger.info(f"SQL执行: {sql_log}")
54
+ except Exception as e:
55
+ SYLogger.error(f"SQL日志处理失败: {str(e)}")
56
+
57
+ @event.listens_for(target, "before_cursor_execute")
58
+ def before_cursor_execute(
59
+ conn, cursor, statement, parameters, context, executemany
60
+ ):
61
+ try:
62
+ # 记录开始时间到 execution_options
63
+ conn = conn.execution_options(_start_time=time.time())
64
+ except Exception as e:
65
+ SYLogger.error(f"SQL开始时间记录失败: {str(e)}")
@@ -15,7 +15,7 @@ from kafka import KafkaProducer
15
15
  from loguru import logger
16
16
  import loguru
17
17
  from sycommon.config.Config import Config, SingletonMeta
18
- from sycommon.middleware.context import current_trace_id
18
+ from sycommon.middleware.context import current_trace_id, current_headers
19
19
  from sycommon.tools.snowflake import Snowflake
20
20
 
21
21
  # 配置Loguru的颜色方案
@@ -114,7 +114,7 @@ class KafkaLogger(metaclass=SingletonMeta):
114
114
  trace_id = None
115
115
 
116
116
  if not trace_id:
117
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
117
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
118
118
 
119
119
  # 获取线程/协程信息
120
120
  thread_info = SYLogger._get_execution_context()
@@ -173,7 +173,7 @@ class KafkaLogger(metaclass=SingletonMeta):
173
173
  "className": "",
174
174
  "sqlCost": 0,
175
175
  "size": len(str(message)),
176
- "uid": int(Snowflake.next_id()) # 独立新的id
176
+ "uid": int(Snowflake.id) # 独立新的id
177
177
  }
178
178
 
179
179
  # 智能队列管理
@@ -212,7 +212,7 @@ class KafkaLogger(metaclass=SingletonMeta):
212
212
  return
213
213
 
214
214
  # 获取当前的trace_id
215
- trace_id = SYLogger.get_trace_id() or Snowflake.next_id()
215
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
216
216
 
217
217
  # 构建错误日志
218
218
  error_log = {
@@ -441,6 +441,18 @@ class SYLogger:
441
441
  """重置当前的 trace_id"""
442
442
  current_trace_id.reset(token)
443
443
 
444
+ @staticmethod
445
+ def get_headers():
446
+ return current_headers.get()
447
+
448
+ @staticmethod
449
+ def set_headers(headers: list[tuple[str, str]]):
450
+ return current_headers.set(headers)
451
+
452
+ @staticmethod
453
+ def reset_headers(token):
454
+ current_headers.reset(token)
455
+
444
456
  @staticmethod
445
457
  def _get_execution_context() -> str:
446
458
  """获取当前执行上下文的线程或协程信息,返回格式化字符串"""
@@ -459,7 +471,7 @@ class SYLogger:
459
471
 
460
472
  @staticmethod
461
473
  def _log(msg: any, level: str = "INFO"):
462
- trace_id = SYLogger.get_trace_id()
474
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
463
475
 
464
476
  if isinstance(msg, dict) or isinstance(msg, list):
465
477
  msg_str = json.dumps(msg, ensure_ascii=False)
@@ -473,7 +485,7 @@ class SYLogger:
473
485
  request_log = {}
474
486
  if level == "ERROR":
475
487
  request_log = {
476
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
488
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
477
489
  "message": msg_str,
478
490
  "traceback": traceback.format_exc(),
479
491
  "level": level,
@@ -481,7 +493,7 @@ class SYLogger:
481
493
  }
482
494
  else:
483
495
  request_log = {
484
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
496
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
485
497
  "message": msg_str,
486
498
  "level": level,
487
499
  "threadName": thread_info
@@ -521,7 +533,7 @@ class SYLogger:
521
533
  @staticmethod
522
534
  def exception(msg: any, *args, **kwargs):
523
535
  """记录异常信息,包括完整堆栈"""
524
- trace_id = SYLogger.get_trace_id()
536
+ trace_id = SYLogger.get_trace_id() or Snowflake.id
525
537
 
526
538
  if isinstance(msg, dict) or isinstance(msg, list):
527
539
  msg_str = json.dumps(msg, ensure_ascii=False)
@@ -533,7 +545,7 @@ class SYLogger:
533
545
 
534
546
  # 构建包含异常堆栈的日志
535
547
  request_log = {
536
- "trace_id": str(trace_id) if trace_id else Snowflake.next_id(),
548
+ "trace_id": str(trace_id) if trace_id else Snowflake.id,
537
549
  "message": msg_str,
538
550
  "level": "ERROR",
539
551
  "threadName": thread_info
@@ -0,0 +1,23 @@
1
+ import logging
2
+
3
+
4
+ def setup_logger_levels():
5
+ """配置各模块的日志级别,抑制无关INFO/DEBUG日志"""
6
+ # Nacos 客户端:仅输出WARNING及以上(屏蔽INFO级的心跳/注册日志)
7
+ logging.getLogger("nacos.client").setLevel(logging.WARNING)
8
+
9
+ # Kafka Python客户端:屏蔽INFO级的连接/版本检测日志
10
+ logging.getLogger("kafka.conn").setLevel(logging.WARNING)
11
+ logging.getLogger("kafka.producer").setLevel(logging.WARNING)
12
+
13
+ # Uvicorn/FastAPI:屏蔽启动/应用初始化的INFO日志(保留ERROR/WARNING)
14
+ # logging.getLogger("uvicorn").setLevel(logging.WARNING)
15
+ # logging.getLogger("uvicorn.access").setLevel(logging.WARNING) # 屏蔽访问日志
16
+ # logging.getLogger("uvicorn.error").setLevel(logging.ERROR) # 仅保留错误
17
+
18
+ # 自定义的root日志(如同步数据库/监听器初始化):屏蔽INFO
19
+ logging.getLogger("root").setLevel(logging.WARNING)
20
+
21
+ # RabbitMQ相关日志(如果有专属日志器)
22
+ logging.getLogger("pika").setLevel(logging.WARNING) # 若使用pika客户端
23
+ logging.getLogger("rabbitmq").setLevel(logging.WARNING)
@@ -1,3 +1,5 @@
1
1
  import contextvars
2
2
 
3
3
  current_trace_id = contextvars.ContextVar("trace_id", default=None)
4
+
5
+ current_headers = contextvars.ContextVar("headers", default=None)
@@ -3,33 +3,53 @@ import re
3
3
  from typing import Dict, Any
4
4
  from fastapi import Request, Response
5
5
  from sycommon.logging.kafka_log import SYLogger
6
+ from sycommon.tools.merge_headers import merge_headers
6
7
  from sycommon.tools.snowflake import Snowflake
7
8
 
8
9
 
9
10
  def setup_trace_id_handler(app):
10
11
  @app.middleware("http")
11
12
  async def trace_id_and_log_middleware(request: Request, call_next):
12
- # 生成或获取 traceId
13
- trace_id = request.headers.get("x-traceId-header")
13
+ # ========== 1. 请求阶段:确保获取/生成 x-traceId-header ==========
14
+ # 优先从请求头读取(兼容任意大小写)
15
+ trace_id = request.headers.get(
16
+ "x-traceId-header") or request.headers.get("x-traceid-header")
17
+ # 无则生成雪花ID
14
18
  if not trace_id:
15
- trace_id = Snowflake.next_id()
19
+ trace_id = Snowflake.id
16
20
 
17
- # 设置 trace_id 上下文
21
+ # 设置 trace_id 到日志上下文
18
22
  token = SYLogger.set_trace_id(trace_id)
23
+ header_token = SYLogger.set_headers(request.headers.raw)
19
24
 
20
25
  # 获取请求参数
21
26
  query_params = dict(request.query_params)
22
27
  request_body: Dict[str, Any] = {}
23
28
  files_info: Dict[str, str] = {}
24
29
 
25
- # 检测请求内容类型
30
+ json_content_types = [
31
+ "application/json",
32
+ "text/plain;charset=utf-8",
33
+ "text/plain"
34
+ ]
26
35
  content_type = request.headers.get("content-type", "").lower()
36
+ is_json_content = any(ct in content_type for ct in json_content_types)
27
37
 
28
- if "application/json" in content_type and request.method in ["POST", "PUT", "PATCH"]:
38
+ if is_json_content and request.method in ["POST", "PUT", "PATCH"]:
29
39
  try:
30
- request_body = await request.json()
40
+ # 兼容纯文本格式的 JSON(先读文本再解析)
41
+ if "text/plain" in content_type:
42
+ raw_text = await request.text(encoding="utf-8")
43
+ request_body = json.loads(raw_text)
44
+ else:
45
+ # application/json 直接解析
46
+ request_body = await request.json()
31
47
  except Exception as e:
32
- request_body = {"error": f"Failed to parse JSON: {str(e)}"}
48
+ try:
49
+ request_body = await request.json()
50
+ except Exception as e:
51
+ # 精准捕获 JSON 解析错误(而非泛 Exception)
52
+ request_body = {"error": f"JSON parse failed: {str(e)}"}
33
53
 
34
54
  elif "multipart/form-data" in content_type and request.method in ["POST", "PUT"]:
35
55
  try:
@@ -62,8 +82,9 @@ def setup_trace_id_handler(app):
62
82
  request_body = {
63
83
  "error": f"Failed to process form data: {str(e)}"}
64
84
 
65
- # 构建请求日志信息
85
+ # 构建请求日志(包含 traceId)
66
86
  request_message = {
87
+ "traceId": trace_id, # 请求日志中加入 traceId
67
88
  "method": request.method,
68
89
  "url": str(request.url),
69
90
  "query_params": query_params,
@@ -77,68 +98,159 @@ def setup_trace_id_handler(app):
77
98
  # 处理请求
78
99
  response = await call_next(request)
79
100
 
80
- content_type = response.headers.get("Content-Type", "")
101
+ # 获取响应Content-Type(统一小写)
102
+ content_type = response.headers.get("content-type", "").lower()
81
103
 
82
- # 处理 SSE 响应
104
+ # ========== 2. SSE 响应:仅设置 x-traceId-header,不修改其他头 ==========
83
105
  if "text/event-stream" in content_type:
84
- # 流式响应不能有Content-Length,移除它
85
- if "Content-Length" in response.headers:
86
- del response.headers["Content-Length"]
87
- response.headers["x-traceId-header"] = trace_id
106
+ try:
107
+ # 强制写入 x-traceId-header 到响应头
108
+ response.headers["x-traceId-header"] = trace_id
109
+ # 确保前端能读取(仅补充暴露头,不覆盖原有值)
110
+ expose_headers = response.headers.get(
111
+ "access-control-expose-headers", "")
112
+ if expose_headers:
113
+ if "x-traceId-header" not in expose_headers.lower():
114
+ response.headers[
115
+ "access-control-expose-headers"] = f"{expose_headers}, x-traceId-header"
116
+ else:
117
+ response.headers["access-control-expose-headers"] = "x-traceId-header"
118
+ # SSE 必须移除 Content-Length(仅这一个额外操作)
119
+ headers_lower = {
120
+ k.lower(): k for k in response.headers.keys()}
121
+ if "content-length" in headers_lower:
122
+ del response.headers[headers_lower["content-length"]]
123
+ except AttributeError:
124
+ # 流式响应头只读:初始化时仅加入 traceId 和必要暴露头
125
+ new_headers = dict(response.headers) if hasattr(
126
+ response.headers, 'items') else {}
127
+ new_headers["x-traceId-header"] = trace_id # 强制加入
128
+ # 保留原有暴露头,补充 traceId
129
+ if "access-control-expose-headers" in new_headers:
130
+ if "x-traceId-header" not in new_headers["access-control-expose-headers"].lower():
131
+ new_headers["access-control-expose-headers"] += ", x-traceId-header"
132
+ else:
133
+ new_headers["access-control-expose-headers"] = "x-traceId-header"
134
+ # 移除 Content-Length
135
+ new_headers.pop("content-length", None)
136
+ response.init_headers(new_headers)
88
137
  return response
89
138
 
90
- # 处理普通响应
139
+ # ========== 3. 非 SSE 响应:强制写入 x-traceId-header,保留 CORS ==========
140
+ # 备份 CORS 头(防止丢失)
141
+ cors_headers = {}
142
+ cors_header_keys = [
143
+ "access-control-allow-origin",
144
+ "access-control-allow-methods",
145
+ "access-control-allow-headers",
146
+ "access-control-expose-headers",
147
+ "access-control-allow-credentials",
148
+ "access-control-max-age"
149
+ ]
150
+ for key in cors_header_keys:
151
+ for k in response.headers.keys():
152
+ if k.lower() == key:
153
+ cors_headers[key] = response.headers[k]
154
+ break
155
+
156
+ # 合并 headers(非 SSE 场景)
157
+ merged_headers = merge_headers(
158
+ source_headers=request.headers,
159
+ target_headers=response.headers,
160
+ keep_keys=None,
161
+ delete_keys={'content-length', 'accept', 'content-type'}
162
+ )
163
+
164
+ # 强制加入 x-traceId-header(优先级最高)
165
+ merged_headers["x-traceId-header"] = trace_id
166
+ # 恢复 CORS 头 + 补充 traceId 到暴露头
167
+ merged_headers.update(cors_headers)
168
+ expose_headers = merged_headers.get(
169
+ "access-control-expose-headers", "")
170
+ if expose_headers:
171
+ if "x-traceId-header" not in expose_headers.lower():
172
+ merged_headers["access-control-expose-headers"] = f"{expose_headers}, x-traceId-header"
173
+ else:
174
+ merged_headers["access-control-expose-headers"] = "x-traceId-header"
175
+
176
+ # 更新响应头
177
+ if hasattr(response.headers, 'clear'):
178
+ response.headers.clear()
179
+ for k, v in merged_headers.items():
180
+ response.headers[k] = v
181
+ elif hasattr(response, "init_headers"):
182
+ response.init_headers(merged_headers)
183
+ else:
184
+ for k, v in merged_headers.items():
185
+ try:
186
+ response.headers[k] = v
187
+ except (AttributeError, KeyError):
188
+ pass
189
+
190
+ # 处理普通响应体(JSON 加入 traceId)
91
191
  response_body = b""
92
192
  try:
93
- # 收集所有响应块
94
193
  async for chunk in response.body_iterator:
95
194
  response_body += chunk
96
195
 
196
+ # 获取 Content-Disposition(统一小写)
97
197
  content_disposition = response.headers.get(
98
- "Content-Disposition", "")
198
+ "content-disposition", "").lower()
99
199
 
100
- # 判断是否能添加 trace_id
200
+ # JSON 响应体加入 traceId
101
201
  if "application/json" in content_type and not content_disposition.startswith("attachment"):
102
202
  try:
103
203
  data = json.loads(response_body)
104
- data["traceId"] = trace_id
105
- new_body = json.dumps(
106
- data, ensure_ascii=False).encode()
204
+ new_body = response_body
205
+ if data:
206
+ data["traceId"] = trace_id # 响应体也加入
207
+ new_body = json.dumps(
208
+ data, ensure_ascii=False).encode()
107
209
 
108
- # 创建新响应,确保Content-Length正确
210
+ # 重建响应,确保 header 包含 x-traceId-header
109
211
  response = Response(
110
212
  content=new_body,
111
213
  status_code=response.status_code,
112
214
  headers=dict(response.headers),
113
215
  media_type=response.media_type
114
216
  )
115
- # 显式设置正确的Content-Length
116
- response.headers["Content-Length"] = str(len(new_body))
217
+ response.headers["content-length"] = str(len(new_body))
218
+ response.headers["x-traceId-header"] = trace_id # 再次兜底
219
+ # 恢复 CORS 头
220
+ for k, v in cors_headers.items():
221
+ response.headers[k] = v
117
222
  except json.JSONDecodeError:
118
- # 如果不是JSON,恢复原始响应体并更新长度
223
+ # JSON 响应:仅更新长度,强制加入 traceId
119
224
  response = Response(
120
225
  content=response_body,
121
226
  status_code=response.status_code,
122
227
  headers=dict(response.headers),
123
228
  media_type=response.media_type
124
229
  )
125
- response.headers["Content-Length"] = str(
230
+ response.headers["content-length"] = str(
126
231
  len(response_body))
232
+ response.headers["x-traceId-header"] = trace_id # 强制加入
233
+ for k, v in cors_headers.items():
234
+ response.headers[k] = v
127
235
  else:
128
- # 非JSON响应,恢复原始响应体
236
+ # 非 JSON 响应:强制加入 traceId
129
237
  response = Response(
130
238
  content=response_body,
131
239
  status_code=response.status_code,
132
240
  headers=dict(response.headers),
133
241
  media_type=response.media_type
134
242
  )
135
- response.headers["Content-Length"] = str(
243
+ response.headers["content-length"] = str(
136
244
  len(response_body))
245
+ response.headers["x-traceId-header"] = trace_id # 强制加入
246
+ for k, v in cors_headers.items():
247
+ response.headers[k] = v
137
248
  except StopAsyncIteration:
138
249
  pass
139
250
 
140
- # 构建响应日志信息
251
+ # 构建响应日志(包含 traceId)
141
252
  response_message = {
253
+ "traceId": trace_id, # 响应日志加入 traceId
142
254
  "status_code": response.status_code,
143
255
  "response_body": response_body.decode('utf-8', errors='ignore'),
144
256
  }
@@ -146,11 +258,21 @@ def setup_trace_id_handler(app):
146
258
  response_message, ensure_ascii=False)
147
259
  SYLogger.info(response_message_str)
148
260
 
149
- response.headers["x-traceId-header"] = trace_id
261
+ # ========== 最终兜底:确保响应头必有 x-traceId-header ==========
262
+ try:
263
+ response.headers["x-traceId-header"] = trace_id
264
+ except AttributeError:
265
+ new_headers = dict(response.headers) if hasattr(
266
+ response.headers, 'items') else {}
267
+ new_headers["x-traceId-header"] = trace_id
268
+ if hasattr(response, "init_headers"):
269
+ response.init_headers(new_headers)
150
270
 
151
271
  return response
152
272
  except Exception as e:
273
+ # 异常日志也加入 traceId
153
274
  error_message = {
275
+ "traceId": trace_id,
154
276
  "error": str(e),
155
277
  "query_params": query_params,
156
278
  "request_body": request_body,
@@ -160,7 +282,8 @@ def setup_trace_id_handler(app):
160
282
  SYLogger.error(error_message_str)
161
283
  raise
162
284
  finally:
163
- # 清理上下文变量,防止泄漏
285
+ # 清理上下文变量
164
286
  SYLogger.reset_trace_id(token)
287
+ SYLogger.reset_headers(header_token)
165
288
 
166
289
  return app