sycommon-python-lib 0.1.55b1__py3-none-any.whl → 0.1.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sycommon/config/Config.py +29 -4
- sycommon/config/LangfuseConfig.py +15 -0
- sycommon/config/RerankerConfig.py +1 -0
- sycommon/config/SentryConfig.py +13 -0
- sycommon/llm/__init__.py +0 -0
- sycommon/llm/embedding.py +204 -0
- sycommon/llm/get_llm.py +37 -0
- sycommon/llm/llm_logger.py +126 -0
- sycommon/llm/llm_tokens.py +119 -0
- sycommon/llm/struct_token.py +192 -0
- sycommon/llm/sy_langfuse.py +103 -0
- sycommon/llm/usage_token.py +117 -0
- sycommon/logging/kafka_log.py +200 -434
- sycommon/logging/logger_levels.py +23 -0
- sycommon/middleware/context.py +2 -0
- sycommon/middleware/exception.py +10 -16
- sycommon/middleware/timeout.py +2 -1
- sycommon/middleware/traceid.py +174 -48
- sycommon/notice/__init__.py +0 -0
- sycommon/notice/uvicorn_monitor.py +200 -0
- sycommon/rabbitmq/rabbitmq_client.py +232 -242
- sycommon/rabbitmq/rabbitmq_pool.py +278 -218
- sycommon/rabbitmq/rabbitmq_service.py +25 -843
- sycommon/rabbitmq/rabbitmq_service_client_manager.py +206 -0
- sycommon/rabbitmq/rabbitmq_service_connection_monitor.py +73 -0
- sycommon/rabbitmq/rabbitmq_service_consumer_manager.py +285 -0
- sycommon/rabbitmq/rabbitmq_service_core.py +117 -0
- sycommon/rabbitmq/rabbitmq_service_producer_manager.py +238 -0
- sycommon/sentry/__init__.py +0 -0
- sycommon/sentry/sy_sentry.py +35 -0
- sycommon/services.py +124 -96
- sycommon/synacos/feign.py +8 -3
- sycommon/synacos/feign_client.py +22 -8
- sycommon/synacos/nacos_client_base.py +119 -0
- sycommon/synacos/nacos_config_manager.py +107 -0
- sycommon/synacos/nacos_heartbeat_manager.py +144 -0
- sycommon/synacos/nacos_service.py +64 -771
- sycommon/synacos/nacos_service_discovery.py +157 -0
- sycommon/synacos/nacos_service_registration.py +270 -0
- sycommon/tools/env.py +62 -0
- sycommon/tools/merge_headers.py +117 -0
- sycommon/tools/snowflake.py +101 -153
- {sycommon_python_lib-0.1.55b1.dist-info → sycommon_python_lib-0.1.56.dist-info}/METADATA +11 -5
- sycommon_python_lib-0.1.56.dist-info/RECORD +89 -0
- sycommon_python_lib-0.1.55b1.dist-info/RECORD +0 -62
- {sycommon_python_lib-0.1.55b1.dist-info → sycommon_python_lib-0.1.56.dist-info}/WHEEL +0 -0
- {sycommon_python_lib-0.1.55b1.dist-info → sycommon_python_lib-0.1.56.dist-info}/entry_points.txt +0 -0
- {sycommon_python_lib-0.1.55b1.dist-info → sycommon_python_lib-0.1.56.dist-info}/top_level.txt +0 -0
|
@@ -5,16 +5,25 @@ from aio_pika import connect_robust, RobustChannel, Message
|
|
|
5
5
|
from aio_pika.abc import (
|
|
6
6
|
AbstractRobustConnection, AbstractQueue, AbstractExchange, AbstractMessage
|
|
7
7
|
)
|
|
8
|
-
from aio_pika.exceptions import ChannelClosed
|
|
9
|
-
import aiormq.exceptions
|
|
10
|
-
|
|
11
8
|
from sycommon.logging.kafka_log import SYLogger
|
|
12
9
|
|
|
13
10
|
logger = SYLogger
|
|
14
11
|
|
|
15
12
|
|
|
13
|
+
class AsyncProperty:
|
|
14
|
+
"""实现 await obj.attr 的支持"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, method):
|
|
17
|
+
self.method = method
|
|
18
|
+
|
|
19
|
+
def __get__(self, obj, objtype=None):
|
|
20
|
+
if obj is None:
|
|
21
|
+
return self
|
|
22
|
+
return self.method(obj)
|
|
23
|
+
|
|
24
|
+
|
|
16
25
|
class RabbitMQConnectionPool:
|
|
17
|
-
"""单连接单通道RabbitMQ
|
|
26
|
+
"""单连接单通道RabbitMQ客户端 (严格执行“先清理后连接”策略)"""
|
|
18
27
|
|
|
19
28
|
def __init__(
|
|
20
29
|
self,
|
|
@@ -23,13 +32,12 @@ class RabbitMQConnectionPool:
|
|
|
23
32
|
username: str,
|
|
24
33
|
password: str,
|
|
25
34
|
virtualhost: str = "/",
|
|
26
|
-
heartbeat: int =
|
|
35
|
+
heartbeat: int = 15,
|
|
27
36
|
app_name: str = "",
|
|
28
|
-
connection_timeout: int =
|
|
37
|
+
connection_timeout: int = 15,
|
|
29
38
|
reconnect_interval: int = 5,
|
|
30
39
|
prefetch_count: int = 2,
|
|
31
40
|
):
|
|
32
|
-
# 基础配置校验与初始化
|
|
33
41
|
self.hosts = [host.strip() for host in hosts if host.strip()]
|
|
34
42
|
if not self.hosts:
|
|
35
43
|
raise ValueError("至少需要提供一个RabbitMQ主机地址")
|
|
@@ -44,295 +52,347 @@ class RabbitMQConnectionPool:
|
|
|
44
52
|
self.reconnect_interval = reconnect_interval
|
|
45
53
|
self.prefetch_count = prefetch_count
|
|
46
54
|
|
|
47
|
-
# 初始化时随机选择一个主机地址(固定使用,依赖原生重连)
|
|
48
55
|
self._current_host: str = random.choice(self.hosts)
|
|
49
|
-
logger.info(
|
|
50
|
-
f"随机选择RabbitMQ主机: {self._current_host}(依赖connect_robust原生自动重连/恢复)")
|
|
56
|
+
logger.info(f"[INIT] 随机选择RabbitMQ主机: {self._current_host}")
|
|
51
57
|
|
|
52
|
-
#
|
|
53
|
-
self._connection: Optional[AbstractRobustConnection] = None
|
|
54
|
-
self._channel: Optional[RobustChannel] = None
|
|
55
|
-
|
|
56
|
-
self._consumer_channels: Dict[str,
|
|
57
|
-
Tuple[RobustChannel, Callable, bool, dict]] = {}
|
|
58
|
+
# 核心资源
|
|
59
|
+
self._connection: Optional[AbstractRobustConnection] = None
|
|
60
|
+
self._channel: Optional[RobustChannel] = None
|
|
61
|
+
self._consumer_channels: Dict[str, RobustChannel] = {}
|
|
58
62
|
|
|
59
|
-
#
|
|
63
|
+
# 状态控制
|
|
60
64
|
self._lock = asyncio.Lock()
|
|
61
65
|
self._initialized = False
|
|
62
66
|
self._is_shutdown = False
|
|
63
67
|
|
|
64
|
-
|
|
65
|
-
"""原子化检查连接有效性(所有状态判断均加锁,确保原子性)"""
|
|
66
|
-
async with self._lock:
|
|
67
|
-
# 优先级:先判断是否关闭,再判断是否初始化,最后判断连接状态
|
|
68
|
-
return not self._is_shutdown and self._initialized and self._connection is not None and not self._connection.is_closed
|
|
69
|
-
|
|
70
|
-
@property
|
|
68
|
+
@AsyncProperty
|
|
71
69
|
async def is_alive(self) -> bool:
|
|
72
|
-
"""
|
|
70
|
+
"""对外暴露的连接存活状态"""
|
|
73
71
|
async with self._lock:
|
|
74
72
|
if self._is_shutdown:
|
|
75
73
|
return False
|
|
76
|
-
|
|
77
|
-
|
|
74
|
+
if not self._initialized:
|
|
75
|
+
return False
|
|
76
|
+
if self._connection is None or self._connection.is_closed:
|
|
77
|
+
return False
|
|
78
|
+
if self._channel is None or self._channel.is_closed:
|
|
79
|
+
return False
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
async def _cleanup_resources(self):
|
|
83
|
+
"""
|
|
84
|
+
彻底清理旧资源
|
|
85
|
+
必须在持有 self._lock 的情况下调用
|
|
86
|
+
"""
|
|
87
|
+
logger.info("🧹 [CLEANUP] 开始清理旧资源...")
|
|
88
|
+
|
|
89
|
+
# 1. 清理所有消费者通道
|
|
90
|
+
if self._consumer_channels:
|
|
91
|
+
channels_to_close = list(self._consumer_channels.values())
|
|
92
|
+
self._consumer_channels.clear()
|
|
78
93
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
94
|
+
for ch in channels_to_close:
|
|
95
|
+
try:
|
|
96
|
+
if not ch.is_closed:
|
|
97
|
+
await ch.close()
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.warning(f"⚠️ [CLEANUP_CH] 关闭消费者通道失败: {e}")
|
|
84
100
|
|
|
85
|
-
|
|
86
|
-
|
|
101
|
+
# 2. 关闭主通道
|
|
102
|
+
if self._channel:
|
|
103
|
+
try:
|
|
104
|
+
if not self._channel.is_closed:
|
|
105
|
+
await self._channel.close()
|
|
106
|
+
except Exception as e:
|
|
107
|
+
logger.warning(f"⚠️ [CLEANUP_MAIN_CH] 关闭主通道失败: {e}")
|
|
108
|
+
finally:
|
|
109
|
+
self._channel = None
|
|
87
110
|
|
|
111
|
+
# 3. 关闭连接
|
|
112
|
+
if self._connection:
|
|
113
|
+
try:
|
|
114
|
+
if not self._connection.is_closed:
|
|
115
|
+
# close() 可能是同步的,也可能是异步的,aio_pika 中通常是异步的
|
|
116
|
+
await self._connection.close()
|
|
117
|
+
except Exception as e:
|
|
118
|
+
logger.warning(f"⚠️ [CLEANUP_CONN] 关闭连接失败: {e}")
|
|
119
|
+
finally:
|
|
120
|
+
self._connection = None
|
|
121
|
+
|
|
122
|
+
logger.info("✅ [CLEANUP] 资源清理完成")
|
|
123
|
+
|
|
124
|
+
async def _create_connection_impl(self, host: str) -> AbstractRobustConnection:
|
|
125
|
+
conn_url = (
|
|
126
|
+
f"amqp://{self.username}:{self.password}@{host}:{self.port}/"
|
|
127
|
+
f"{self.virtualhost}?name={self.app_name}&heartbeat={self.heartbeat}"
|
|
128
|
+
f"&reconnect_interval={self.reconnect_interval}&fail_fast=1"
|
|
129
|
+
)
|
|
130
|
+
logger.info(f"🔌 [CONNECT] 尝试连接节点: {host}")
|
|
88
131
|
try:
|
|
89
|
-
conn = await
|
|
90
|
-
conn_url,
|
|
91
|
-
timeout=self.connection_timeout
|
|
132
|
+
conn = await asyncio.wait_for(
|
|
133
|
+
connect_robust(conn_url),
|
|
134
|
+
timeout=self.connection_timeout + 5
|
|
92
135
|
)
|
|
93
|
-
logger.info(f"
|
|
136
|
+
logger.info(f"✅ [CONNECT_OK] 节点连接成功: {host}")
|
|
94
137
|
return conn
|
|
95
138
|
except Exception as e:
|
|
96
|
-
logger.error(f"
|
|
97
|
-
raise ConnectionError(
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
139
|
+
logger.error(f"❌ [CONNECT_FAIL] 节点 {host} 连接失败: {str(e)}")
|
|
140
|
+
raise ConnectionError(f"无法连接RabbitMQ {host}") from e
|
|
141
|
+
|
|
142
|
+
async def _ensure_main_channel(self) -> RobustChannel:
|
|
143
|
+
"""
|
|
144
|
+
确保主通道有效
|
|
145
|
+
逻辑:
|
|
146
|
+
1. 检查连接状态
|
|
147
|
+
2. 如果断开 -> 清理 -> 轮询重试
|
|
148
|
+
3. 如果连接在但通道断开 -> 仅重建通道
|
|
149
|
+
"""
|
|
102
150
|
async with self._lock:
|
|
103
|
-
# 先判断是否关闭(优先级最高)
|
|
104
151
|
if self._is_shutdown:
|
|
105
|
-
raise RuntimeError("
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
152
|
+
raise RuntimeError("客户端已关闭")
|
|
153
|
+
|
|
154
|
+
# --- 阶段A:连接恢复逻辑 (如果连接断了) ---
|
|
155
|
+
if self._connection is None or self._connection.is_closed:
|
|
156
|
+
|
|
157
|
+
# 1. 【强制】先彻底清理所有旧资源
|
|
158
|
+
await self._cleanup_resources()
|
|
159
|
+
|
|
160
|
+
retry_hosts = self.hosts.copy()
|
|
161
|
+
random.shuffle(retry_hosts)
|
|
162
|
+
last_error = None
|
|
163
|
+
max_attempts = min(len(retry_hosts), 3)
|
|
164
|
+
|
|
165
|
+
# 2. 轮询尝试新连接
|
|
166
|
+
for _ in range(max_attempts):
|
|
167
|
+
if not retry_hosts:
|
|
168
|
+
break
|
|
169
|
+
|
|
170
|
+
host = retry_hosts.pop()
|
|
171
|
+
self._current_host = host
|
|
172
|
+
temp_conn = None
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
temp_conn = await self._create_connection_impl(host)
|
|
176
|
+
|
|
177
|
+
# 3. 只有在连接成功后,才更新 self._connection
|
|
178
|
+
self._connection = temp_conn
|
|
179
|
+
temp_conn = None # 转移所有权
|
|
180
|
+
self._initialized = True
|
|
181
|
+
last_error = None
|
|
182
|
+
logger.info(f"🔗 [RECONNECT_OK] 切换到节点: {host}")
|
|
183
|
+
break
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.warning(f"⚠️ [RECONNECT_RETRY] 节点 {host} 不可用")
|
|
187
|
+
if temp_conn is not None:
|
|
188
|
+
# 尝试连接失败了,必须把这个“半成品”连接关掉
|
|
189
|
+
try:
|
|
190
|
+
await temp_conn.close()
|
|
191
|
+
except Exception:
|
|
192
|
+
pass
|
|
193
|
+
last_error = e
|
|
194
|
+
await asyncio.sleep(self.reconnect_interval)
|
|
195
|
+
|
|
196
|
+
# 4. 如果所有尝试都失败
|
|
197
|
+
if last_error:
|
|
198
|
+
# 确保状态是干净的
|
|
199
|
+
self._connection = None
|
|
200
|
+
self._initialized = False
|
|
201
|
+
logger.error("💥 [RECONNECT_FATAL] 所有节点重试失败")
|
|
202
|
+
raise ConnectionError("所有 RabbitMQ 节点连接失败") from last_error
|
|
203
|
+
|
|
204
|
+
# --- 阶段B:通道恢复逻辑 (如果连接在但通道断了) ---
|
|
205
|
+
# 注意:这里不需要清理连接,只重置通道
|
|
206
|
+
if self._channel is None or self._channel.is_closed:
|
|
207
|
+
try:
|
|
208
|
+
self._channel = await self._connection.channel()
|
|
209
|
+
await self._channel.set_qos(prefetch_count=self.prefetch_count)
|
|
210
|
+
logger.info(f"✅ [CHANNEL_OK] 主通道已恢复")
|
|
211
|
+
except Exception as e:
|
|
212
|
+
# 如果连通道都创建不了,说明这个连接也是坏的,回滚到阶段A
|
|
213
|
+
logger.error(f"❌ [CHANNEL_FAIL] 通道创建失败,标记连接无效: {e}")
|
|
214
|
+
# 强制清理连接,触发下一次进入阶段A
|
|
215
|
+
await self._cleanup_resources()
|
|
216
|
+
raise
|
|
136
217
|
|
|
137
218
|
return self._channel
|
|
138
219
|
|
|
139
220
|
async def init_pools(self):
|
|
140
|
-
"""
|
|
221
|
+
"""初始化入口"""
|
|
141
222
|
async with self._lock:
|
|
142
|
-
# 原子化判断:是否已关闭/已初始化
|
|
143
223
|
if self._is_shutdown:
|
|
144
|
-
raise RuntimeError("
|
|
224
|
+
raise RuntimeError("客户端已关闭")
|
|
145
225
|
if self._initialized:
|
|
146
|
-
logger.warning("客户端已初始化,无需重复调用")
|
|
147
226
|
return
|
|
148
227
|
|
|
228
|
+
# 在 try 之前声明变量,确保 except 块能访问
|
|
229
|
+
conn_created_in_this_try = None
|
|
230
|
+
|
|
149
231
|
try:
|
|
150
|
-
#
|
|
151
|
-
|
|
232
|
+
# 锁外创建连接,减少锁持有时间
|
|
233
|
+
init_host = random.choice(self.hosts)
|
|
234
|
+
conn = await self._create_connection_impl(init_host)
|
|
152
235
|
|
|
153
|
-
#
|
|
154
|
-
|
|
236
|
+
# 记录本次创建的连接
|
|
237
|
+
conn_created_in_this_try = conn
|
|
155
238
|
|
|
156
|
-
# 3. 标记为已初始化(加锁保护)
|
|
157
239
|
async with self._lock:
|
|
240
|
+
if self._is_shutdown:
|
|
241
|
+
raise RuntimeError("客户端已关闭")
|
|
242
|
+
|
|
243
|
+
# 提交新资源
|
|
244
|
+
self._connection = conn
|
|
245
|
+
self._channel = await self._connection.channel()
|
|
246
|
+
await self._channel.set_qos(prefetch_count=self.prefetch_count)
|
|
158
247
|
self._initialized = True
|
|
159
248
|
|
|
160
|
-
|
|
249
|
+
# 所有权转移成功,清空临时引用,防止 finally 重复关闭
|
|
250
|
+
conn_created_in_this_try = None
|
|
251
|
+
|
|
252
|
+
logger.info(f"🚀 [INIT_OK] 连接池初始化完成: {init_host}")
|
|
253
|
+
|
|
161
254
|
except Exception as e:
|
|
162
|
-
logger.error(f"
|
|
163
|
-
|
|
255
|
+
logger.error(f"💥 [INIT_FAIL] 初始化异常: {str(e)}")
|
|
256
|
+
|
|
257
|
+
# 这里现在可以合法访问 conn_created_in_this_try
|
|
258
|
+
if conn_created_in_this_try is not None:
|
|
259
|
+
try:
|
|
260
|
+
await conn_created_in_this_try.close()
|
|
261
|
+
except Exception:
|
|
262
|
+
pass
|
|
263
|
+
|
|
264
|
+
if not self._is_shutdown:
|
|
265
|
+
await self.close()
|
|
164
266
|
raise
|
|
165
267
|
|
|
166
|
-
async def
|
|
167
|
-
"""
|
|
268
|
+
async def force_reconnect(self):
|
|
269
|
+
"""
|
|
270
|
+
强制重连
|
|
271
|
+
严格执行:清理所有资源 -> 尝试建立新资源
|
|
272
|
+
"""
|
|
168
273
|
async with self._lock:
|
|
169
|
-
# 原子化状态校验
|
|
170
274
|
if self._is_shutdown:
|
|
171
|
-
|
|
172
|
-
if not self._initialized:
|
|
173
|
-
raise RuntimeError("客户端未初始化,请先调用init_pools()")
|
|
275
|
+
return
|
|
174
276
|
|
|
175
|
-
|
|
176
|
-
channel = await self._check_and_recover_channel()
|
|
177
|
-
return channel, self._connection # 单通道无需管理"使用中/空闲"状态
|
|
277
|
+
logger.warning("🔄 [FORCE_RECONNECT] 开始强制重连...")
|
|
178
278
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
channel, _ = await self.acquire_channel()
|
|
182
|
-
return await channel.declare_queue(queue_name, **kwargs)
|
|
279
|
+
# 1. 【关键】标记未初始化,迫使 _ensure_main_channel 走清理流程
|
|
280
|
+
self._initialized = False
|
|
183
281
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
282
|
+
# 2. 【关键】立即清理旧资源 (在锁内)
|
|
283
|
+
await self._cleanup_resources()
|
|
284
|
+
|
|
285
|
+
# 此时 self._connection 和 self._channel 均为 None
|
|
286
|
+
|
|
287
|
+
# 3. 锁外触发恢复 (避免阻塞锁太久)
|
|
288
|
+
try:
|
|
289
|
+
await self.acquire_channel()
|
|
290
|
+
logger.info("✅ [FORCE_RECONNECT_OK] 强制重连成功")
|
|
291
|
+
except Exception as e:
|
|
292
|
+
logger.error(f"❌ [FORCE_RECONNECT_FAIL] 强制重连失败: {e}")
|
|
293
|
+
raise
|
|
294
|
+
|
|
295
|
+
async def acquire_channel(self) -> Tuple[RobustChannel, AbstractRobustConnection]:
|
|
296
|
+
"""获取主通道"""
|
|
297
|
+
if not self._initialized and not self._is_shutdown:
|
|
298
|
+
await self.init_pools()
|
|
299
|
+
return await self._ensure_main_channel(), self._connection
|
|
188
300
|
|
|
189
301
|
async def publish_message(self, routing_key: str, message_body: bytes, exchange_name: str = "", **kwargs):
|
|
190
|
-
"""发布消息(依赖原生自动重连/恢复)"""
|
|
191
302
|
channel, _ = await self.acquire_channel()
|
|
192
303
|
try:
|
|
193
304
|
exchange = channel.default_exchange if not exchange_name else await channel.get_exchange(exchange_name)
|
|
194
305
|
message = Message(body=message_body, **kwargs)
|
|
195
306
|
await exchange.publish(message, routing_key=routing_key)
|
|
196
|
-
logger.debug(
|
|
197
|
-
f"消息发布成功 - 交换机: {exchange.name}, 路由键: {routing_key}"
|
|
198
|
-
)
|
|
199
307
|
except Exception as e:
|
|
200
|
-
logger.error(f"
|
|
201
|
-
raise
|
|
308
|
+
logger.error(f"❌ [PUBLISH_FAIL] 发布失败: {str(e)}")
|
|
309
|
+
raise
|
|
202
310
|
|
|
203
311
|
async def consume_queue(self, queue_name: str, callback: Callable[[AbstractMessage], asyncio.Future], auto_ack: bool = False, **kwargs):
|
|
204
|
-
|
|
312
|
+
if not self._initialized:
|
|
313
|
+
await self.init_pools()
|
|
314
|
+
|
|
315
|
+
# 检查是否已存在
|
|
205
316
|
async with self._lock:
|
|
206
|
-
# 原子化状态校验
|
|
207
317
|
if self._is_shutdown:
|
|
208
|
-
raise RuntimeError("
|
|
209
|
-
if not self._initialized:
|
|
210
|
-
raise RuntimeError("客户端未初始化,请先调用init_pools()")
|
|
318
|
+
raise RuntimeError("客户端已关闭")
|
|
211
319
|
if queue_name in self._consumer_channels:
|
|
212
|
-
logger.warning(f"队列 {queue_name}
|
|
320
|
+
logger.warning(f"⚠️ [CONSUMER_EXISTS] 队列 {queue_name} 已在消费中")
|
|
213
321
|
return
|
|
322
|
+
if not self._connection or self._connection.is_closed:
|
|
323
|
+
raise RuntimeError("连接不可用,无法启动消费")
|
|
214
324
|
|
|
215
|
-
#
|
|
325
|
+
# 声明队列 (使用主通道)
|
|
216
326
|
await self.declare_queue(queue_name, **kwargs)
|
|
217
327
|
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
raise RuntimeError("客户端已关闭,无法创建消费者通道")
|
|
222
|
-
if not self._connection or self._connection.is_closed:
|
|
223
|
-
raise RuntimeError("无有效连接,无法创建消费者通道")
|
|
224
|
-
channel = await self._connection.channel()
|
|
225
|
-
await channel.set_qos(prefetch_count=self.prefetch_count)
|
|
328
|
+
try:
|
|
329
|
+
# 获取最新连接
|
|
330
|
+
_, conn = await self.acquire_channel()
|
|
226
331
|
|
|
227
|
-
#
|
|
228
|
-
|
|
229
|
-
|
|
332
|
+
# 创建消费者通道
|
|
333
|
+
consumer_channel = await conn.channel()
|
|
334
|
+
await consumer_channel.set_qos(prefetch_count=self.prefetch_count)
|
|
230
335
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
if not auto_ack:
|
|
239
|
-
await message.nack(requeue=True)
|
|
240
|
-
return
|
|
241
|
-
channel_valid = not channel.is_closed
|
|
242
|
-
conn_valid = self._connection and not self._connection.is_closed
|
|
243
|
-
|
|
244
|
-
if not channel_valid or not conn_valid:
|
|
245
|
-
logger.warning(f"消费者通道 {queue_name} 失效(等待原生自动恢复)")
|
|
246
|
-
if not auto_ack:
|
|
247
|
-
await message.nack(requeue=True)
|
|
336
|
+
async with self._lock:
|
|
337
|
+
# 再次检查,防止并发创建
|
|
338
|
+
if self._is_shutdown:
|
|
339
|
+
await consumer_channel.close()
|
|
340
|
+
return
|
|
341
|
+
if queue_name in self._consumer_channels:
|
|
342
|
+
await consumer_channel.close() # 其他协程已经创建了
|
|
248
343
|
return
|
|
249
344
|
|
|
250
|
-
|
|
251
|
-
await callback(message)
|
|
252
|
-
if not auto_ack:
|
|
253
|
-
await message.ack()
|
|
254
|
-
except ChannelClosed as e:
|
|
255
|
-
logger.error(f"消费者通道 {queue_name} 关闭: {str(e)}", exc_info=True)
|
|
256
|
-
if not auto_ack:
|
|
257
|
-
await message.nack(requeue=True)
|
|
258
|
-
except aiormq.exceptions.ChannelInvalidStateError as e:
|
|
259
|
-
logger.error(
|
|
260
|
-
f"消费者通道 {queue_name} 状态异常: {str(e)}", exc_info=True)
|
|
261
|
-
if not auto_ack:
|
|
262
|
-
await message.nack(requeue=True)
|
|
263
|
-
except Exception as e:
|
|
264
|
-
logger.error(
|
|
265
|
-
f"消费消息失败(队列: {queue_name}): {str(e)}", exc_info=True)
|
|
266
|
-
if not auto_ack:
|
|
267
|
-
await message.nack(requeue=True)
|
|
345
|
+
self._consumer_channels[queue_name] = consumer_channel
|
|
268
346
|
|
|
269
|
-
|
|
347
|
+
async def consume_callback_wrapper(message: AbstractMessage):
|
|
348
|
+
try:
|
|
349
|
+
await callback(message)
|
|
350
|
+
if not auto_ack:
|
|
351
|
+
await message.ack()
|
|
352
|
+
except Exception as e:
|
|
353
|
+
logger.error(f"❌ [CALLBACK_ERR] {queue_name}: {e}")
|
|
354
|
+
if not auto_ack:
|
|
355
|
+
await message.nack(requeue=True)
|
|
270
356
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
queue_name,
|
|
274
|
-
consumer_callback=consume_callback_wrapper,
|
|
275
|
-
auto_ack=auto_ack,
|
|
276
|
-
**kwargs
|
|
357
|
+
await consumer_channel.basic_consume(
|
|
358
|
+
queue_name, consumer_callback=consume_callback_wrapper, auto_ack=auto_ack
|
|
277
359
|
)
|
|
360
|
+
logger.info(f"🎧 [CONSUME_START] {queue_name}")
|
|
361
|
+
|
|
278
362
|
except Exception as e:
|
|
279
|
-
logger.error(f"
|
|
280
|
-
#
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
if
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
363
|
+
logger.error(f"💥 [CONSUME_ERR] {queue_name}: {e}")
|
|
364
|
+
# 失败时清理字典
|
|
365
|
+
async with self._lock:
|
|
366
|
+
if queue_name in self._consumer_channels:
|
|
367
|
+
# 注意:这里清理的是字典里的引用,通道本身应该在 try 块里被关闭了吗?
|
|
368
|
+
# 如果 consumer_channel 创建成功但 basic_consume 失败,需要手动关闭
|
|
369
|
+
ch = self._consumer_channels.pop(queue_name, None)
|
|
370
|
+
if ch:
|
|
371
|
+
try:
|
|
372
|
+
await ch.close()
|
|
373
|
+
except:
|
|
374
|
+
pass
|
|
290
375
|
raise
|
|
291
376
|
|
|
292
377
|
async def close(self):
|
|
293
|
-
"""
|
|
378
|
+
"""资源销毁"""
|
|
294
379
|
async with self._lock:
|
|
295
380
|
if self._is_shutdown:
|
|
296
|
-
logger.warning("客户端已关闭,无需重复操作")
|
|
297
381
|
return
|
|
298
|
-
# 先标记为关闭,阻止后续所有操作(原子化修改)
|
|
299
382
|
self._is_shutdown = True
|
|
300
383
|
self._initialized = False
|
|
301
384
|
|
|
302
|
-
logger.info("
|
|
385
|
+
logger.info("🛑 [CLOSE] 开始关闭连接池...")
|
|
303
386
|
|
|
304
|
-
# 1.
|
|
305
|
-
|
|
306
|
-
async with self._lock:
|
|
307
|
-
consumer_channels = list(self._consumer_channels.values())
|
|
308
|
-
self._consumer_channels.clear()
|
|
309
|
-
for channel, _, _, _ in consumer_channels:
|
|
310
|
-
try:
|
|
311
|
-
if not channel.is_closed:
|
|
312
|
-
await channel.close()
|
|
313
|
-
except Exception as e:
|
|
314
|
-
logger.warning(f"关闭消费者通道失败: {str(e)}")
|
|
387
|
+
# 1. 清理所有资源
|
|
388
|
+
await self._cleanup_resources()
|
|
315
389
|
|
|
316
|
-
|
|
317
|
-
if self._channel:
|
|
318
|
-
try:
|
|
319
|
-
async with self._lock:
|
|
320
|
-
if not self._channel.is_closed:
|
|
321
|
-
await self._channel.close()
|
|
322
|
-
except Exception as e:
|
|
323
|
-
logger.warning(f"关闭主通道失败: {str(e)}")
|
|
324
|
-
self._channel = None
|
|
390
|
+
logger.info("🏁 [CLOSE] 连接池已关闭")
|
|
325
391
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
async with self._lock:
|
|
330
|
-
if not self._connection.is_closed:
|
|
331
|
-
await self._connection.close()
|
|
332
|
-
logger.info(
|
|
333
|
-
f"已关闭连接: {self._current_host}:{self.port}(终止原生自动重连)")
|
|
334
|
-
except Exception as e:
|
|
335
|
-
logger.warning(f"关闭连接失败: {str(e)}")
|
|
336
|
-
self._connection = None
|
|
392
|
+
async def declare_queue(self, queue_name: str, **kwargs) -> AbstractQueue:
|
|
393
|
+
channel, _ = await self.acquire_channel()
|
|
394
|
+
return await channel.declare_queue(queue_name, **kwargs)
|
|
337
395
|
|
|
338
|
-
|
|
396
|
+
async def declare_exchange(self, exchange_name: str, exchange_type: str = "direct", **kwargs) -> AbstractExchange:
|
|
397
|
+
channel, _ = await self.acquire_channel()
|
|
398
|
+
return await channel.declare_exchange(exchange_name, exchange_type, **kwargs)
|