sycommon-python-lib 0.1.43__py3-none-any.whl → 0.1.44__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sycommon/rabbitmq/rabbitmq_pool.py +332 -294
- sycommon/rabbitmq/rabbitmq_service.py +0 -4
- sycommon/services.py +2 -1
- sycommon/synacos/feign.py +9 -8
- sycommon/synacos/nacos_service.py +65 -46
- {sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/METADATA +7 -7
- {sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/RECORD +10 -10
- {sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/WHEEL +0 -0
- {sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/entry_points.txt +0 -0
- {sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
from typing import
|
|
3
|
-
from aio_pika import connect_robust, Channel
|
|
4
|
-
from aio_pika.abc import
|
|
2
|
+
from typing import Optional, List, Set, Iterator, Tuple
|
|
3
|
+
from aio_pika import connect_robust, Channel, Message
|
|
4
|
+
from aio_pika.abc import (
|
|
5
|
+
AbstractRobustConnection, AbstractQueue, AbstractExchange, AbstractMessage
|
|
6
|
+
)
|
|
5
7
|
|
|
6
8
|
from sycommon.logging.kafka_log import SYLogger
|
|
7
9
|
|
|
@@ -9,7 +11,7 @@ logger = SYLogger
|
|
|
9
11
|
|
|
10
12
|
|
|
11
13
|
class RabbitMQConnectionPool:
|
|
12
|
-
"""
|
|
14
|
+
"""单连接RabbitMQ通道池(严格单连接)"""
|
|
13
15
|
|
|
14
16
|
def __init__(
|
|
15
17
|
self,
|
|
@@ -18,349 +20,385 @@ class RabbitMQConnectionPool:
|
|
|
18
20
|
username: str,
|
|
19
21
|
password: str,
|
|
20
22
|
virtualhost: str = "/",
|
|
21
|
-
|
|
22
|
-
channel_pool_size: int = 5,
|
|
23
|
+
channel_pool_size: int = 1,
|
|
23
24
|
heartbeat: int = 30,
|
|
24
25
|
app_name: str = "",
|
|
25
|
-
|
|
26
|
-
|
|
26
|
+
connection_timeout: int = 30,
|
|
27
|
+
reconnect_interval: int = 30,
|
|
28
|
+
prefetch_count: int = 2,
|
|
27
29
|
):
|
|
28
30
|
self.hosts = [host.strip() for host in hosts if host.strip()]
|
|
29
31
|
if not self.hosts:
|
|
30
32
|
raise ValueError("至少需要提供一个RabbitMQ主机地址")
|
|
33
|
+
|
|
34
|
+
# 连接配置(所有通道共享此连接的配置)
|
|
31
35
|
self.port = port
|
|
32
36
|
self.username = username
|
|
33
37
|
self.password = password
|
|
34
38
|
self.virtualhost = virtualhost
|
|
35
39
|
self.app_name = app_name or "rabbitmq-client"
|
|
36
40
|
self.heartbeat = heartbeat
|
|
37
|
-
self.reconnect_interval = reconnect_interval
|
|
38
41
|
self.connection_timeout = connection_timeout
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
self.connection_pool_size = connection_pool_size
|
|
42
|
+
self.reconnect_interval = reconnect_interval
|
|
43
|
+
self.prefetch_count = prefetch_count
|
|
42
44
|
self.channel_pool_size = channel_pool_size
|
|
43
45
|
|
|
44
|
-
#
|
|
45
|
-
self.
|
|
46
|
-
self.
|
|
47
|
-
AbstractRobustConnection]] = [] # 通道绑定所属连接
|
|
48
|
-
self._used_channels: Set[Tuple[Channel,
|
|
49
|
-
AbstractRobustConnection]] = set()
|
|
46
|
+
# 节点轮询:仅用于连接失效时切换节点(仍保持单连接)
|
|
47
|
+
self._host_iterator: Iterator[str] = self._create_host_iterator()
|
|
48
|
+
self._current_host: Optional[str] = None # 当前连接的节点
|
|
50
49
|
|
|
51
|
-
#
|
|
52
|
-
self.
|
|
53
|
-
self.
|
|
50
|
+
# 核心资源(严格单连接 + 通道池)
|
|
51
|
+
self._connection: Optional[AbstractRobustConnection] = None # 唯一连接
|
|
52
|
+
self._free_channels: List[Channel] = [] # 通道池(仅存储当前连接的通道)
|
|
53
|
+
self._used_channels: Set[Channel] = set()
|
|
54
54
|
|
|
55
|
-
#
|
|
55
|
+
# 状态控制(确保线程安全)
|
|
56
|
+
self._lock = asyncio.Lock()
|
|
56
57
|
self._initialized = False
|
|
57
|
-
self._reconnect_task: Optional[asyncio.Task] = None
|
|
58
58
|
self._is_shutdown = False
|
|
59
|
+
self._reconnecting = False # 避免重连并发冲突
|
|
60
|
+
|
|
61
|
+
def _create_host_iterator(self) -> Iterator[str]:
|
|
62
|
+
"""创建节点轮询迭代器(无限循环,仅用于切换节点)"""
|
|
63
|
+
while True:
|
|
64
|
+
for host in self.hosts:
|
|
65
|
+
yield host
|
|
59
66
|
|
|
60
67
|
@property
|
|
61
68
|
def is_alive(self) -> bool:
|
|
62
|
-
|
|
69
|
+
"""检查唯一连接是否存活(使用is_closed判断,兼容所有版本)"""
|
|
70
|
+
if not self._initialized or not self._connection:
|
|
63
71
|
return False
|
|
64
|
-
#
|
|
65
|
-
asyncio.create_task(self.
|
|
66
|
-
|
|
67
|
-
|
|
72
|
+
# 异步清理失效通道(不影响主流程)
|
|
73
|
+
asyncio.create_task(self._clean_invalid_channels())
|
|
74
|
+
return not self._connection.is_closed
|
|
75
|
+
|
|
76
|
+
async def _safe_close_resources(self):
|
|
77
|
+
"""安全关闭资源:先关通道,再关连接(保证单连接特性)"""
|
|
78
|
+
async with self._lock:
|
|
79
|
+
# 1. 关闭所有通道(无论空闲还是使用中)
|
|
80
|
+
all_channels = self._free_channels + list(self._used_channels)
|
|
81
|
+
for channel in all_channels:
|
|
82
|
+
try:
|
|
83
|
+
if not channel.is_closed:
|
|
84
|
+
await channel.close()
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.warning(f"关闭通道失败: {str(e)}")
|
|
87
|
+
self._free_channels.clear()
|
|
88
|
+
self._used_channels.clear()
|
|
68
89
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
90
|
+
# 2. 关闭唯一连接
|
|
91
|
+
if self._connection:
|
|
92
|
+
try:
|
|
93
|
+
if not self._connection.is_closed:
|
|
94
|
+
await self._connection.close()
|
|
95
|
+
logger.info(f"已关闭唯一连接: {self._current_host}:{self.port}")
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.warning(f"关闭连接失败: {str(e)}")
|
|
98
|
+
self._connection = None # 置空,确保单连接
|
|
74
99
|
|
|
75
|
-
async def
|
|
76
|
-
"""
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
100
|
+
async def _create_single_connection(self) -> AbstractRobustConnection:
|
|
101
|
+
"""创建唯一连接(失败时轮询节点,切换前关闭旧连接)"""
|
|
102
|
+
max_attempts = len(self.hosts) # 每个节点尝试1次
|
|
103
|
+
attempts = 0
|
|
104
|
+
last_error: Optional[Exception] = None
|
|
80
105
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
await self._create_initial_connections()
|
|
84
|
-
# 启动连接监控任务(后台检查并重建失效连接)
|
|
85
|
-
self._reconnect_task = asyncio.create_task(
|
|
86
|
-
self._monitor_connections())
|
|
87
|
-
self._initialized = True
|
|
88
|
-
logger.info(
|
|
89
|
-
f"RabbitMQ连接池初始化成功 - 连接数: {len(self._connections)}, "
|
|
90
|
-
f"空闲通道数: {len(self._free_channels)}, 集群节点: {self.hosts}"
|
|
91
|
-
)
|
|
92
|
-
except Exception as e:
|
|
93
|
-
logger.error(f"连接池初始化失败: {str(e)}", exc_info=True)
|
|
94
|
-
await self.close()
|
|
95
|
-
raise
|
|
106
|
+
while attempts < max_attempts and not self._is_shutdown:
|
|
107
|
+
next_host = next(self._host_iterator)
|
|
96
108
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
try:
|
|
101
|
-
conn = await self._create_single_connection()
|
|
102
|
-
self._connections.append(
|
|
103
|
-
(conn, asyncio.get_event_loop().time()))
|
|
104
|
-
# 为每个连接创建初始通道
|
|
105
|
-
chan_count_per_conn = self.channel_pool_size // self.connection_pool_size
|
|
106
|
-
for _ in range(chan_count_per_conn):
|
|
107
|
-
chan = await conn.channel()
|
|
108
|
-
self._free_channels.append((chan, conn))
|
|
109
|
-
except Exception as e:
|
|
110
|
-
logger.error(f"创建初始连接/通道失败(第{i+1}个): {str(e)}", exc_info=True)
|
|
111
|
-
# 允许部分连接失败,后续监控任务会重试
|
|
112
|
-
continue
|
|
109
|
+
# 切换节点前:强制关闭旧连接(保证单连接)
|
|
110
|
+
if self._connection:
|
|
111
|
+
await self._safe_close_resources()
|
|
113
112
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
retry_count = 0
|
|
117
|
-
max_retries = 3 # 每个节点最多重试3次
|
|
118
|
-
|
|
119
|
-
while retry_count < max_retries and not self._is_shutdown:
|
|
120
|
-
if not hosts:
|
|
121
|
-
hosts = self.hosts.copy()
|
|
122
|
-
retry_count += 1
|
|
123
|
-
if retry_count >= max_retries:
|
|
124
|
-
logger.error(
|
|
125
|
-
f"所有RabbitMQ节点({self.hosts})均连接失败,已重试{max_retries}次,将在15秒后再次尝试"
|
|
126
|
-
)
|
|
127
|
-
# 固定15秒间隔后退出,由监控任务触发下一次重试
|
|
128
|
-
await asyncio.sleep(self.reconnect_interval)
|
|
129
|
-
break
|
|
130
|
-
|
|
131
|
-
host = hosts.pop(0)
|
|
132
|
-
conn_url = (
|
|
133
|
-
f"amqp://{self.username}:{self.password}@{host}:{self.port}/{self.virtualhost}"
|
|
134
|
-
f"?heartbeat={self.heartbeat}&timeout={self.connection_timeout}"
|
|
135
|
-
)
|
|
113
|
+
self._current_host = next_host
|
|
114
|
+
conn_url = f"amqp://{self.username}:{self.password}@{self._current_host}:{self.port}/{self.virtualhost}"
|
|
136
115
|
|
|
137
116
|
try:
|
|
117
|
+
logger.info(f"尝试创建唯一连接: {self._current_host}:{self.port}")
|
|
138
118
|
conn = await connect_robust(
|
|
139
119
|
conn_url,
|
|
140
|
-
|
|
141
|
-
"connection_name": f"{self.app_name}
|
|
120
|
+
properties={
|
|
121
|
+
"connection_name": f"{self.app_name}_single_conn",
|
|
122
|
+
"product": self.app_name
|
|
142
123
|
},
|
|
143
|
-
|
|
124
|
+
heartbeat=self.heartbeat,
|
|
125
|
+
timeout=self.connection_timeout,
|
|
126
|
+
reconnect_interval=self.reconnect_interval,
|
|
127
|
+
max_reconnect_attempts=None, # 单节点内部自动重连
|
|
144
128
|
)
|
|
145
|
-
|
|
146
|
-
# 连接关闭回调(固定间隔重连)
|
|
147
|
-
def on_connection_closed(conn_instance: AbstractConnection, exc: Optional[BaseException]):
|
|
148
|
-
logger.warning(
|
|
149
|
-
f"RabbitMQ连接关闭: {conn_instance!r},原因: {exc}", exc_info=exc)
|
|
150
|
-
asyncio.create_task(
|
|
151
|
-
self._remove_invalid_connection(cast(AbstractRobustConnection, conn_instance)))
|
|
152
|
-
|
|
153
|
-
setattr(conn, '_pool_close_callback', on_connection_closed)
|
|
154
|
-
conn.close_callbacks.add(on_connection_closed)
|
|
155
|
-
|
|
156
|
-
logger.info(f"成功连接到RabbitMQ节点: {host}:{self.port}")
|
|
129
|
+
logger.info(f"唯一连接创建成功: {self._current_host}:{self.port}")
|
|
157
130
|
return conn
|
|
158
131
|
except Exception as e:
|
|
159
|
-
|
|
160
|
-
|
|
132
|
+
attempts += 1
|
|
133
|
+
last_error = e
|
|
134
|
+
logger.error(
|
|
135
|
+
f"连接节点 {self._current_host}:{self.port} 失败({attempts}/{max_attempts}): {str(e)}",
|
|
136
|
+
exc_info=True
|
|
161
137
|
)
|
|
162
|
-
#
|
|
163
|
-
await asyncio.sleep(self.reconnect_interval)
|
|
138
|
+
await asyncio.sleep(30) # 避免频繁重试
|
|
164
139
|
|
|
165
|
-
raise
|
|
166
|
-
f"
|
|
167
|
-
)
|
|
140
|
+
raise ConnectionError(
|
|
141
|
+
f"所有节点创建唯一连接失败(节点列表: {self.hosts})"
|
|
142
|
+
) from last_error
|
|
168
143
|
|
|
169
|
-
async def
|
|
170
|
-
"""
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
except Exception as e:
|
|
178
|
-
logger.warning(f"移除连接回调失败: {str(e)}")
|
|
179
|
-
# 1. 移除失效连接
|
|
180
|
-
async with self._conn_lock:
|
|
181
|
-
self._connections = [
|
|
182
|
-
(conn, ts) for conn, ts in self._connections if conn != invalid_conn
|
|
183
|
-
]
|
|
184
|
-
# 2. 移除该连接关联的所有通道
|
|
185
|
-
async with self._chan_lock:
|
|
186
|
-
self._free_channels = [
|
|
187
|
-
(chan, conn) for chan, conn in self._free_channels if conn != invalid_conn
|
|
188
|
-
]
|
|
189
|
-
self._used_channels = {
|
|
190
|
-
(chan, conn) for chan, conn in self._used_channels if conn != invalid_conn
|
|
191
|
-
}
|
|
192
|
-
# 3. 触发连接重建
|
|
193
|
-
asyncio.create_task(self._recreate_connection())
|
|
194
|
-
|
|
195
|
-
async def _recreate_connection(self):
|
|
196
|
-
"""重建连接:固定间隔重试"""
|
|
197
|
-
try:
|
|
198
|
-
# 重建前检查是否已达到连接池上限
|
|
199
|
-
async with self._conn_lock:
|
|
200
|
-
if len(self._connections) >= self.connection_pool_size:
|
|
201
|
-
logger.debug("连接池已达最大限制,跳过重建连接")
|
|
202
|
-
return
|
|
144
|
+
async def _init_channel_pool(self):
|
|
145
|
+
"""初始化通道池(绑定到唯一连接,仅创建指定数量的通道)"""
|
|
146
|
+
if not self._connection or self._connection.is_closed:
|
|
147
|
+
raise RuntimeError("无有效连接,无法初始化通道池")
|
|
148
|
+
|
|
149
|
+
async with self._lock:
|
|
150
|
+
self._free_channels.clear()
|
|
151
|
+
self._used_channels.clear()
|
|
203
152
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
self._connections.append(
|
|
207
|
-
(conn, asyncio.get_event_loop().time()))
|
|
208
|
-
# 补充通道
|
|
209
|
-
chan_count_per_conn = self.channel_pool_size // self.connection_pool_size
|
|
210
|
-
for _ in range(chan_count_per_conn):
|
|
153
|
+
# 创建指定数量的通道(池大小由channel_pool_size控制)
|
|
154
|
+
for i in range(self.channel_pool_size):
|
|
211
155
|
try:
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
156
|
+
channel = await self._connection.channel()
|
|
157
|
+
await channel.set_qos(prefetch_count=self.prefetch_count)
|
|
158
|
+
self._free_channels.append(channel)
|
|
215
159
|
except Exception as e:
|
|
216
|
-
logger.
|
|
160
|
+
logger.error(f"创建通道失败(第{i+1}个): {str(e)}", exc_info=True)
|
|
161
|
+
# 通道创建失败不中断,继续创建剩余通道
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
logger.info(
|
|
165
|
+
f"通道池初始化完成 - 连接: {self._current_host}:{self.port}, "
|
|
166
|
+
f"可用通道数: {len(self._free_channels)}/{self.channel_pool_size}"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
async def _reconnect_if_needed(self) -> bool:
|
|
170
|
+
"""连接失效时重连(保证单连接)"""
|
|
171
|
+
if self._is_shutdown or self._reconnecting:
|
|
172
|
+
return False
|
|
173
|
+
|
|
174
|
+
self._reconnecting = True
|
|
175
|
+
try:
|
|
176
|
+
logger.warning("连接失效,开始重连...")
|
|
177
|
+
# 重新创建唯一连接
|
|
178
|
+
self._connection = await self._create_single_connection()
|
|
179
|
+
# 重新初始化通道池
|
|
180
|
+
await self._init_channel_pool()
|
|
181
|
+
logger.info("重连成功,通道池已恢复")
|
|
182
|
+
return True
|
|
217
183
|
except Exception as e:
|
|
218
|
-
logger.error(f"
|
|
219
|
-
#
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
async def _monitor_connections(self):
|
|
224
|
-
"""后台监控:固定15秒检查一次连接状态"""
|
|
225
|
-
while self._initialized and not self._is_shutdown:
|
|
226
|
-
try:
|
|
227
|
-
await asyncio.sleep(self.reconnect_interval) # 固定15秒间隔检查
|
|
228
|
-
current_time = asyncio.get_event_loop().time()
|
|
229
|
-
|
|
230
|
-
# 清理失效/超时连接
|
|
231
|
-
async with self._conn_lock:
|
|
232
|
-
valid_connections = []
|
|
233
|
-
for conn, last_active in self._connections:
|
|
234
|
-
if conn.is_closed or (current_time - last_active) > 600: # 10分钟无活动清理
|
|
235
|
-
logger.warning(f"清理失效/超时连接: {conn}")
|
|
236
|
-
try:
|
|
237
|
-
# 移除回调+关闭连接
|
|
238
|
-
callback = getattr(
|
|
239
|
-
conn, '_pool_close_callback', None)
|
|
240
|
-
if callback:
|
|
241
|
-
conn.close_callbacks.discard(callback)
|
|
242
|
-
await conn.close()
|
|
243
|
-
except:
|
|
244
|
-
pass
|
|
245
|
-
else:
|
|
246
|
-
valid_connections.append((conn, last_active))
|
|
247
|
-
self._connections = valid_connections
|
|
248
|
-
|
|
249
|
-
# 补充缺失的连接(不超过连接池最大限制)
|
|
250
|
-
missing_conn_count = self.connection_pool_size - \
|
|
251
|
-
len(self._connections)
|
|
252
|
-
if missing_conn_count > 0:
|
|
253
|
-
logger.info(f"连接池缺少{missing_conn_count}个连接,尝试补充")
|
|
254
|
-
# 逐个补充,避免同时创建大量连接
|
|
255
|
-
for _ in range(missing_conn_count):
|
|
256
|
-
asyncio.create_task(self._recreate_connection())
|
|
257
|
-
except Exception as e:
|
|
258
|
-
logger.error(f"连接监控任务异常: {str(e)}", exc_info=True)
|
|
259
|
-
# 异常后仍保持15秒间隔
|
|
260
|
-
await asyncio.sleep(self.reconnect_interval)
|
|
184
|
+
logger.error(f"重连失败: {str(e)}", exc_info=True)
|
|
185
|
+
self._initialized = False # 重连失败后标记未初始化
|
|
186
|
+
return False
|
|
187
|
+
finally:
|
|
188
|
+
self._reconnecting = False
|
|
261
189
|
|
|
262
|
-
async def
|
|
263
|
-
"""
|
|
264
|
-
if not self.
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
async with self.
|
|
268
|
-
#
|
|
269
|
-
|
|
270
|
-
chan
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
#
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
try:
|
|
296
|
-
chan = await conn.channel()
|
|
297
|
-
self._used_channels.add((chan, conn))
|
|
298
|
-
logger.info(
|
|
299
|
-
f"创建新通道,当前通道数: {len(self._used_channels)}/{self.channel_pool_size}")
|
|
300
|
-
return chan, conn
|
|
301
|
-
except Exception as e:
|
|
302
|
-
logger.warning(f"使用连接创建通道失败: {str(e)}")
|
|
303
|
-
# 无有效连接,尝试创建新连接
|
|
190
|
+
async def _clean_invalid_channels(self):
|
|
191
|
+
"""清理失效通道并补充(仅针对当前唯一连接)"""
|
|
192
|
+
if not self._connection:
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
async with self._lock:
|
|
196
|
+
# 1. 清理空闲通道中的失效通道
|
|
197
|
+
valid_free = [
|
|
198
|
+
chan for chan in self._free_channels if not chan.is_closed]
|
|
199
|
+
invalid_count = len(self._free_channels) - len(valid_free)
|
|
200
|
+
if invalid_count > 0:
|
|
201
|
+
logger.warning(f"清理{invalid_count}个失效空闲通道")
|
|
202
|
+
self._free_channels = valid_free
|
|
203
|
+
|
|
204
|
+
# 2. 清理使用中通道中的失效通道
|
|
205
|
+
valid_used = {
|
|
206
|
+
chan for chan in self._used_channels if not chan.is_closed}
|
|
207
|
+
invalid_used_count = len(self._used_channels) - len(valid_used)
|
|
208
|
+
if invalid_used_count > 0:
|
|
209
|
+
logger.warning(f"清理{invalid_used_count}个失效使用中通道")
|
|
210
|
+
self._used_channels = valid_used
|
|
211
|
+
|
|
212
|
+
# 3. 检查连接是否有效,无效则触发重连
|
|
213
|
+
if self._connection.is_closed:
|
|
214
|
+
await self._reconnect_if_needed()
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
# 4. 补充通道到指定大小(仅使用当前唯一连接创建)
|
|
218
|
+
total_valid = len(self._free_channels) + len(self._used_channels)
|
|
219
|
+
missing = self.channel_pool_size - total_valid
|
|
220
|
+
if missing > 0:
|
|
221
|
+
logger.info(f"通道池缺少{missing}个通道,补充中...")
|
|
222
|
+
for _ in range(missing):
|
|
304
223
|
try:
|
|
305
|
-
|
|
306
|
-
self.
|
|
307
|
-
|
|
308
|
-
chan = await conn.channel()
|
|
309
|
-
self._used_channels.add((chan, conn))
|
|
310
|
-
return chan, conn
|
|
224
|
+
channel = await self._connection.channel()
|
|
225
|
+
await channel.set_qos(prefetch_count=self.prefetch_count)
|
|
226
|
+
self._free_channels.append(channel)
|
|
311
227
|
except Exception as e:
|
|
312
|
-
logger.error(f"
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
228
|
+
logger.error(f"补充通道失败: {str(e)}", exc_info=True)
|
|
229
|
+
break
|
|
230
|
+
|
|
231
|
+
async def init_pools(self):
|
|
232
|
+
"""初始化:创建唯一连接 + 初始化通道池(仅执行一次)"""
|
|
233
|
+
if self._initialized:
|
|
234
|
+
logger.warning("通道池已初始化,无需重复调用")
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
if self._is_shutdown:
|
|
238
|
+
raise RuntimeError("通道池已关闭,无法初始化")
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
# 1. 创建唯一连接
|
|
242
|
+
self._connection = await self._create_single_connection()
|
|
243
|
+
# 2. 初始化通道池(绑定到该连接)
|
|
244
|
+
await self._init_channel_pool()
|
|
245
|
+
self._initialized = True
|
|
246
|
+
logger.info("RabbitMQ单连接通道池初始化完成")
|
|
247
|
+
except Exception as e:
|
|
248
|
+
logger.error(f"初始化失败: {str(e)}", exc_info=True)
|
|
249
|
+
await self._safe_close_resources()
|
|
250
|
+
raise
|
|
251
|
+
|
|
252
|
+
async def acquire_channel(self) -> Tuple[Channel, AbstractRobustConnection]:
|
|
253
|
+
"""获取通道(返回元组:(通道, 唯一连接),兼容上层代码)"""
|
|
254
|
+
if not self._initialized:
|
|
255
|
+
raise RuntimeError("通道池未初始化,请先调用init_pools()")
|
|
256
|
+
|
|
257
|
+
if self._is_shutdown:
|
|
258
|
+
raise RuntimeError("通道池已关闭,无法获取通道")
|
|
259
|
+
|
|
260
|
+
# 先清理失效通道,确保池内通道有效
|
|
261
|
+
await self._clean_invalid_channels()
|
|
262
|
+
|
|
263
|
+
async with self._lock:
|
|
264
|
+
# 优先从空闲池获取
|
|
265
|
+
if self._free_channels:
|
|
266
|
+
channel = self._free_channels.pop()
|
|
267
|
+
self._used_channels.add(channel)
|
|
268
|
+
# 返回(通道, 唯一连接)元组
|
|
269
|
+
return channel, self._connection
|
|
270
|
+
|
|
271
|
+
# 通道池已满,创建临时通道(超出池大小,用完关闭)
|
|
272
|
+
try:
|
|
273
|
+
if not self._connection or self._connection.is_closed:
|
|
274
|
+
raise RuntimeError("唯一连接已失效,无法创建临时通道")
|
|
275
|
+
|
|
276
|
+
channel = await self._connection.channel()
|
|
277
|
+
await channel.set_qos(prefetch_count=self.prefetch_count)
|
|
278
|
+
self._used_channels.add(channel)
|
|
279
|
+
logger.warning(
|
|
280
|
+
f"通道池已达上限({self.channel_pool_size}),创建临时通道(用完自动关闭)"
|
|
281
|
+
)
|
|
282
|
+
# 返回(通道, 唯一连接)元组
|
|
283
|
+
return channel, self._connection
|
|
284
|
+
except Exception as e:
|
|
285
|
+
logger.error(f"获取通道失败: {str(e)}", exc_info=True)
|
|
286
|
+
raise
|
|
316
287
|
|
|
317
288
|
async def release_channel(self, channel: Channel, conn: AbstractRobustConnection):
|
|
318
|
-
"""
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
if key in self._used_channels:
|
|
322
|
-
self._used_channels.remove(key)
|
|
323
|
-
# 通道和连接都有效才归还
|
|
324
|
-
if not conn.is_closed and not channel.is_closed:
|
|
325
|
-
self._free_channels.append(key)
|
|
326
|
-
else:
|
|
327
|
-
logger.warning("释放无效通道,已自动丢弃")
|
|
289
|
+
"""释放通道(接收通道和连接参数,兼容上层代码)"""
|
|
290
|
+
if not channel or not conn or self._is_shutdown:
|
|
291
|
+
return
|
|
328
292
|
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
self._initialized = False
|
|
332
|
-
# 停止监控任务
|
|
333
|
-
if self._reconnect_task and not self._reconnect_task.done():
|
|
334
|
-
self._reconnect_task.cancel()
|
|
293
|
+
# 仅处理当前唯一连接的通道(避免无效连接的通道)
|
|
294
|
+
if conn != self._connection:
|
|
335
295
|
try:
|
|
336
|
-
await
|
|
337
|
-
|
|
338
|
-
|
|
296
|
+
await channel.close()
|
|
297
|
+
logger.warning("已关闭非当前连接的通道(可能是重连后的旧通道)")
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.warning(f"关闭非当前连接通道失败: {str(e)}")
|
|
300
|
+
return
|
|
301
|
+
|
|
302
|
+
async with self._lock:
|
|
303
|
+
if channel not in self._used_channels:
|
|
304
|
+
return
|
|
339
305
|
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
306
|
+
self._used_channels.remove(channel)
|
|
307
|
+
|
|
308
|
+
# 仅归还:当前连接有效 + 通道未关闭 + 池未满
|
|
309
|
+
if (not self._connection.is_closed
|
|
310
|
+
and not channel.is_closed
|
|
311
|
+
and len(self._free_channels) < self.channel_pool_size):
|
|
312
|
+
self._free_channels.append(channel)
|
|
313
|
+
else:
|
|
314
|
+
# 无效通道直接关闭
|
|
343
315
|
try:
|
|
344
|
-
|
|
345
|
-
await channel.close()
|
|
316
|
+
await channel.close()
|
|
346
317
|
except Exception as e:
|
|
347
318
|
logger.warning(f"关闭通道失败: {str(e)}")
|
|
348
|
-
self._free_channels.clear()
|
|
349
|
-
self._used_channels.clear()
|
|
350
319
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
320
|
+
async def declare_queue(self, queue_name: str, **kwargs) -> AbstractQueue:
|
|
321
|
+
"""声明队列(使用池内通道,共享唯一连接)"""
|
|
322
|
+
channel, conn = await self.acquire_channel()
|
|
323
|
+
try:
|
|
324
|
+
return await channel.declare_queue(queue_name, **kwargs)
|
|
325
|
+
finally:
|
|
326
|
+
await self.release_channel(channel, conn)
|
|
327
|
+
|
|
328
|
+
async def declare_exchange(self, exchange_name: str, exchange_type: str = "direct", **kwargs) -> AbstractExchange:
|
|
329
|
+
"""声明交换机(使用池内通道,共享唯一连接)"""
|
|
330
|
+
channel, conn = await self.acquire_channel()
|
|
331
|
+
try:
|
|
332
|
+
return await channel.declare_exchange(exchange_name, exchange_type, **kwargs)
|
|
333
|
+
finally:
|
|
334
|
+
await self.release_channel(channel, conn)
|
|
335
|
+
|
|
336
|
+
async def publish_message(self, routing_key: str, message_body: bytes, exchange_name: str = "", **kwargs):
|
|
337
|
+
"""发布消息(使用池内通道,共享唯一连接)"""
|
|
338
|
+
channel, conn = await self.acquire_channel()
|
|
339
|
+
try:
|
|
340
|
+
exchange = channel.default_exchange if not exchange_name else await channel.get_exchange(exchange_name)
|
|
341
|
+
message = Message(body=message_body, **kwargs)
|
|
342
|
+
await exchange.publish(message, routing_key=routing_key)
|
|
343
|
+
logger.debug(
|
|
344
|
+
f"消息发布成功 - 节点: {self._current_host}, 交换机: {exchange.name}, 路由键: {routing_key}"
|
|
345
|
+
)
|
|
346
|
+
except Exception as e:
|
|
347
|
+
logger.error(f"发布消息失败: {str(e)}", exc_info=True)
|
|
348
|
+
raise
|
|
349
|
+
finally:
|
|
350
|
+
await self.release_channel(channel, conn)
|
|
351
|
+
|
|
352
|
+
async def consume_queue(self, queue_name: str, callback, auto_ack: bool = False, **kwargs):
|
|
353
|
+
"""消费队列(使用池内通道,共享唯一连接)"""
|
|
354
|
+
if not self._initialized:
|
|
355
|
+
raise RuntimeError("通道池未初始化,请先调用init_pools()")
|
|
356
|
+
|
|
357
|
+
queue = await self.declare_queue(queue_name, **kwargs)
|
|
358
|
+
current_channel, current_conn = await self.acquire_channel() # 元组解包
|
|
359
|
+
|
|
360
|
+
async def consume_callback_wrapper(message: AbstractMessage):
|
|
361
|
+
"""消费回调包装(处理通道失效重连)"""
|
|
362
|
+
nonlocal current_channel, current_conn
|
|
363
|
+
try:
|
|
364
|
+
# 检查通道是否有效(连接可能已切换)
|
|
365
|
+
if (current_channel.is_closed
|
|
366
|
+
or current_conn.is_closed
|
|
367
|
+
or current_conn != self._connection):
|
|
368
|
+
logger.warning("消费通道失效,重新获取通道...")
|
|
369
|
+
await self.release_channel(current_channel, current_conn)
|
|
370
|
+
current_channel, current_conn = await self.acquire_channel()
|
|
371
|
+
return
|
|
372
|
+
|
|
373
|
+
await callback(message)
|
|
374
|
+
if not auto_ack:
|
|
375
|
+
await message.ack()
|
|
376
|
+
except Exception as e:
|
|
377
|
+
logger.error(f"消费消息失败: {str(e)}", exc_info=True)
|
|
378
|
+
if not auto_ack:
|
|
379
|
+
await message.nack(requeue=True)
|
|
380
|
+
|
|
381
|
+
logger.info(f"开始消费队列: {queue_name}(连接节点: {self._current_host})")
|
|
382
|
+
try:
|
|
383
|
+
async with queue.iterator() as queue_iter:
|
|
384
|
+
async for message in queue_iter:
|
|
385
|
+
if self._is_shutdown:
|
|
386
|
+
logger.info("消费已停止,退出消费循环")
|
|
387
|
+
break
|
|
388
|
+
await consume_callback_wrapper(message)
|
|
389
|
+
finally:
|
|
390
|
+
await self.release_channel(current_channel, current_conn)
|
|
391
|
+
|
|
392
|
+
async def close(self):
|
|
393
|
+
"""关闭通道池:释放所有通道 + 关闭唯一连接"""
|
|
394
|
+
if self._is_shutdown:
|
|
395
|
+
logger.warning("通道池已关闭,无需重复操作")
|
|
396
|
+
return
|
|
397
|
+
|
|
398
|
+
self._is_shutdown = True
|
|
399
|
+
logger.info("开始关闭RabbitMQ单连接通道池...")
|
|
400
|
+
|
|
401
|
+
# 安全释放所有资源
|
|
402
|
+
await self._safe_close_resources()
|
|
365
403
|
|
|
366
|
-
logger.info("RabbitMQ
|
|
404
|
+
logger.info("RabbitMQ单连接通道池已完全关闭")
|
|
@@ -118,10 +118,6 @@ class RabbitMQService:
|
|
|
118
118
|
username=cls._config.get('username', ""),
|
|
119
119
|
password=cls._config.get('password', ""),
|
|
120
120
|
virtualhost=cls._config.get('virtual-host', "/"),
|
|
121
|
-
connection_pool_size=cls._config.get(
|
|
122
|
-
'connection_pool_size', 2),
|
|
123
|
-
channel_pool_size=cls._config.get('channel_pool_size', 5),
|
|
124
|
-
heartbeat=cls._config.get('heartbeat', 30),
|
|
125
121
|
app_name=cls._config.get("APP_NAME", "")
|
|
126
122
|
)
|
|
127
123
|
|
sycommon/services.py
CHANGED
|
@@ -71,7 +71,8 @@ class Services(metaclass=SingletonMeta):
|
|
|
71
71
|
app.state.config = {
|
|
72
72
|
"host": cls._config.get('Host', '0.0.0.0'),
|
|
73
73
|
"port": cls._config.get('Port', 8080),
|
|
74
|
-
"workers": cls._config.get('Workers', 1)
|
|
74
|
+
"workers": cls._config.get('Workers', 1),
|
|
75
|
+
"h11_max_incomplete_event_size": cls._config.get('H11MaxIncompleteEventSize', 1024 * 1024 * 1024)
|
|
75
76
|
}
|
|
76
77
|
|
|
77
78
|
# 立即配置非异步服务(在应用启动前)
|
sycommon/synacos/feign.py
CHANGED
|
@@ -97,7 +97,7 @@ async def feign(service_name, api_path, method='GET', params=None, headers=None,
|
|
|
97
97
|
data=data,
|
|
98
98
|
timeout=timeout
|
|
99
99
|
) as response:
|
|
100
|
-
return await _handle_feign_response(response)
|
|
100
|
+
return await _handle_feign_response(response, service_name, api_path)
|
|
101
101
|
else:
|
|
102
102
|
# 普通JSON请求
|
|
103
103
|
async with session.request(
|
|
@@ -108,21 +108,21 @@ async def feign(service_name, api_path, method='GET', params=None, headers=None,
|
|
|
108
108
|
json=body,
|
|
109
109
|
timeout=timeout
|
|
110
110
|
) as response:
|
|
111
|
-
return await _handle_feign_response(response)
|
|
111
|
+
return await _handle_feign_response(response, service_name, api_path)
|
|
112
112
|
except aiohttp.ClientError as e:
|
|
113
113
|
SYLogger.error(
|
|
114
|
-
f"nacos:请求服务接口时出错ClientError path: {api_path} error:{e}")
|
|
114
|
+
f"nacos:请求服务接口时出错ClientError server: {service_name} path: {api_path} error:{e}")
|
|
115
115
|
return None
|
|
116
116
|
except Exception as e:
|
|
117
117
|
import traceback
|
|
118
118
|
SYLogger.error(
|
|
119
|
-
f"nacos:请求服务接口时出错 path: {api_path} error:{traceback.format_exc()}")
|
|
119
|
+
f"nacos:请求服务接口时出错 server: {service_name} path: {api_path} error:{traceback.format_exc()}")
|
|
120
120
|
return None
|
|
121
121
|
finally:
|
|
122
122
|
await session.close()
|
|
123
123
|
|
|
124
124
|
|
|
125
|
-
async def _handle_feign_response(response):
|
|
125
|
+
async def _handle_feign_response(response, service_name: str, api_path: str):
|
|
126
126
|
"""
|
|
127
127
|
处理Feign请求的响应,统一返回格式
|
|
128
128
|
调整逻辑:先判断状态码,再处理内容
|
|
@@ -160,9 +160,9 @@ async def _handle_feign_response(response):
|
|
|
160
160
|
response_body = await response.text(encoding='utf-8', errors='ignore')
|
|
161
161
|
except Exception:
|
|
162
162
|
binary_data = await response.read()
|
|
163
|
-
response_body = f"非200状态,响应无法解码:{binary_data[:100].hex()}"
|
|
163
|
+
response_body = f"非200状态,响应无法解码:{binary_data[:100].hex()} server: {service_name} path: {api_path}"
|
|
164
164
|
|
|
165
|
-
error_msg = f"请求失败,状态码: {status_code},响应内容: {str(response_body)[:500]}"
|
|
165
|
+
error_msg = f"请求失败,状态码: {status_code},响应内容: {str(response_body)[:500]} server: {service_name} path: {api_path}"
|
|
166
166
|
SYLogger.error(error_msg)
|
|
167
167
|
return {
|
|
168
168
|
"success": False,
|
|
@@ -174,5 +174,6 @@ async def _handle_feign_response(response):
|
|
|
174
174
|
except Exception as e:
|
|
175
175
|
import traceback
|
|
176
176
|
error_detail = f"处理响应异常: {str(e)}\n{traceback.format_exc()}"
|
|
177
|
-
SYLogger.error(
|
|
177
|
+
SYLogger.error(
|
|
178
|
+
f"nacos:处理响应时出错: {error_detail} server: {service_name} path: {api_path}")
|
|
178
179
|
return None
|
|
@@ -10,7 +10,6 @@ import yaml
|
|
|
10
10
|
import time
|
|
11
11
|
import atexit
|
|
12
12
|
import random
|
|
13
|
-
from concurrent.futures import ThreadPoolExecutor
|
|
14
13
|
|
|
15
14
|
from sycommon.config.Config import SingletonMeta
|
|
16
15
|
from sycommon.logging.kafka_log import SYLogger
|
|
@@ -34,22 +33,17 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
34
33
|
# 添加可重入锁用于状态同步
|
|
35
34
|
self._state_lock = threading.RLock()
|
|
36
35
|
|
|
37
|
-
# 优化线程池配置,增加工作线程数量
|
|
38
|
-
self._executor = ThreadPoolExecutor(max_workers=1) # 主线程池
|
|
39
|
-
self._heartbeat_executor = ThreadPoolExecutor(
|
|
40
|
-
max_workers=1) # 增加心跳线程
|
|
41
|
-
self._monitor_executor = ThreadPoolExecutor(
|
|
42
|
-
max_workers=1) # 增加监控线程
|
|
43
|
-
|
|
44
36
|
# 配置参数
|
|
45
37
|
self.max_retries = self.nacos_config.get('maxRetries', 5)
|
|
46
38
|
self.retry_delay = self.nacos_config.get('retryDelay', 1)
|
|
47
|
-
self.retry_backoff = self.nacos_config.get('retryBackoff', 1.5)
|
|
48
39
|
self.max_retry_delay = self.nacos_config.get('maxRetryDelay', 30)
|
|
40
|
+
# 心跳间隔:优先从配置读取,默认15秒(可通过配置修改)
|
|
49
41
|
self.heartbeat_interval = self.nacos_config.get(
|
|
50
42
|
'heartbeatInterval', 15)
|
|
43
|
+
# 心跳超时:固定设置为10秒(需求指定)
|
|
44
|
+
self.heartbeat_timeout = 15
|
|
51
45
|
self.register_retry_interval = self.nacos_config.get(
|
|
52
|
-
'registerRetryInterval',
|
|
46
|
+
'registerRetryInterval', 15) # 注册重试间隔
|
|
53
47
|
|
|
54
48
|
# 长期重试配置
|
|
55
49
|
self.long_term_retry_delay = self.nacos_config.get(
|
|
@@ -129,8 +123,7 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
129
123
|
self._client_initialized = True
|
|
130
124
|
return True
|
|
131
125
|
except Exception as e:
|
|
132
|
-
delay = min(self.retry_delay
|
|
133
|
-
attempt), self.max_retry_delay)
|
|
126
|
+
delay = min(self.retry_delay, self.max_retry_delay)
|
|
134
127
|
SYLogger.error(
|
|
135
128
|
f"nacos:客户端初始化失败 (尝试 {attempt+1}/{self.max_retries}): {e}")
|
|
136
129
|
time.sleep(delay)
|
|
@@ -193,8 +186,7 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
193
186
|
|
|
194
187
|
except Exception as e:
|
|
195
188
|
attempt += 1
|
|
196
|
-
delay = min(self.retry_delay
|
|
197
|
-
(attempt - 1)), self.max_retry_delay)
|
|
189
|
+
delay = min(self.retry_delay, self.max_retry_delay)
|
|
198
190
|
|
|
199
191
|
SYLogger.error(
|
|
200
192
|
f"nacos:客户端初始化失败 (尝试 {attempt}/{max_attempts}): {e}")
|
|
@@ -342,8 +334,7 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
342
334
|
except Exception as e:
|
|
343
335
|
last_error = str(e)
|
|
344
336
|
retry_count += 1
|
|
345
|
-
delay = min(self.register_retry_interval
|
|
346
|
-
(self.retry_backoff ** (retry_count - 1)), self.max_retry_delay)
|
|
337
|
+
delay = min(self.register_retry_interval, self.max_retry_delay)
|
|
347
338
|
|
|
348
339
|
SYLogger.warning(
|
|
349
340
|
f"nacos:服务注册尝试 {retry_count} 失败: {last_error},{delay}秒后重试")
|
|
@@ -429,8 +420,13 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
429
420
|
timeout = 60 # 60秒超时
|
|
430
421
|
start_time = time.time()
|
|
431
422
|
|
|
432
|
-
#
|
|
433
|
-
|
|
423
|
+
# 启动注册线程,不阻塞主线程(替换原线程池)
|
|
424
|
+
register_thread = threading.Thread(
|
|
425
|
+
target=instance.register_with_retry,
|
|
426
|
+
daemon=True,
|
|
427
|
+
name="NacosRegisterThread"
|
|
428
|
+
)
|
|
429
|
+
register_thread.start()
|
|
434
430
|
|
|
435
431
|
# 等待注册完成或超时
|
|
436
432
|
while True:
|
|
@@ -461,7 +457,7 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
461
457
|
|
|
462
458
|
# 启动连接监控线程
|
|
463
459
|
threading.Thread(target=instance.monitor_connection,
|
|
464
|
-
daemon=True).start()
|
|
460
|
+
daemon=True, name="NacosConnectionMonitorThread").start()
|
|
465
461
|
else:
|
|
466
462
|
SYLogger.info("nacos:本地开发模式,跳过服务注册流程")
|
|
467
463
|
|
|
@@ -485,7 +481,11 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
485
481
|
)
|
|
486
482
|
self._heartbeat_thread.daemon = True
|
|
487
483
|
self._heartbeat_thread.start()
|
|
488
|
-
SYLogger.info(
|
|
484
|
+
SYLogger.info(
|
|
485
|
+
f"nacos:心跳线程启动,线程ID: {self._heartbeat_thread.ident},"
|
|
486
|
+
f"心跳间隔: {self.heartbeat_interval}秒,"
|
|
487
|
+
f"心跳超时: {self.heartbeat_timeout}秒"
|
|
488
|
+
)
|
|
489
489
|
|
|
490
490
|
def _send_heartbeat_loop(self):
|
|
491
491
|
"""优化后的心跳发送循环,确保严格按间隔执行"""
|
|
@@ -493,7 +493,8 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
493
493
|
thread_ident = current_thread.ident
|
|
494
494
|
SYLogger.info(
|
|
495
495
|
f"nacos:心跳循环启动 - 线程ID: {thread_ident}, "
|
|
496
|
-
f"配置间隔: {self.heartbeat_interval}
|
|
496
|
+
f"配置间隔: {self.heartbeat_interval}秒, "
|
|
497
|
+
f"超时时间: {self.heartbeat_timeout}秒"
|
|
497
498
|
)
|
|
498
499
|
|
|
499
500
|
consecutive_fail = 0 # 连续失败计数器
|
|
@@ -512,7 +513,7 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
512
513
|
f"nacos:服务未注册,跳过心跳 - 线程ID: {thread_ident}")
|
|
513
514
|
consecutive_fail = 0
|
|
514
515
|
else:
|
|
515
|
-
#
|
|
516
|
+
# 发送心跳(10秒超时)
|
|
516
517
|
success = self.send_heartbeat()
|
|
517
518
|
if success:
|
|
518
519
|
consecutive_fail = 0
|
|
@@ -545,26 +546,46 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
545
546
|
SYLogger.info(f"nacos:心跳循环已停止 - 线程ID: {thread_ident}")
|
|
546
547
|
|
|
547
548
|
def send_heartbeat(self):
|
|
548
|
-
"""
|
|
549
|
+
"""发送心跳并添加10秒超时控制(替换线程池实现)"""
|
|
549
550
|
if not self.ensure_client_connected():
|
|
550
551
|
SYLogger.warning("nacos:客户端未连接,心跳发送失败")
|
|
551
552
|
return False
|
|
552
553
|
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
554
|
+
# 用线程+join实现10秒超时控制
|
|
555
|
+
result_list = [] # 用于线程间传递结果
|
|
556
|
+
|
|
557
|
+
def heartbeat_task():
|
|
558
|
+
"""心跳实际执行任务"""
|
|
559
|
+
try:
|
|
560
|
+
result = self._send_heartbeat_internal()
|
|
561
|
+
result_list.append(result)
|
|
562
|
+
except Exception as e:
|
|
563
|
+
SYLogger.error(f"nacos:心跳任务执行异常: {e}")
|
|
564
|
+
result_list.append(False)
|
|
565
|
+
|
|
566
|
+
# 启动心跳任务线程
|
|
567
|
+
task_thread = threading.Thread(
|
|
568
|
+
target=heartbeat_task,
|
|
569
|
+
daemon=True,
|
|
570
|
+
name="NacosHeartbeatTaskThread"
|
|
571
|
+
)
|
|
572
|
+
task_thread.start()
|
|
573
|
+
|
|
574
|
+
# 等待线程完成,最多等待10秒
|
|
575
|
+
task_thread.join(timeout=self.heartbeat_timeout)
|
|
576
|
+
|
|
577
|
+
# 处理结果
|
|
578
|
+
if not result_list:
|
|
579
|
+
# 超时未返回
|
|
580
|
+
SYLogger.error(f"nacos:心跳发送超时({self.heartbeat_timeout}秒)")
|
|
565
581
|
self._client_initialized = False # 强制重连
|
|
566
582
|
return False
|
|
567
583
|
|
|
584
|
+
# 检查心跳结果
|
|
585
|
+
if result_list[0]:
|
|
586
|
+
self._last_successful_heartbeat = time.time()
|
|
587
|
+
return result_list[0]
|
|
588
|
+
|
|
568
589
|
def _send_heartbeat_internal(self):
|
|
569
590
|
"""实际的心跳发送逻辑"""
|
|
570
591
|
result = self.nacos_client.send_heartbeat(
|
|
@@ -620,9 +641,13 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
620
641
|
else:
|
|
621
642
|
self.registered = False
|
|
622
643
|
SYLogger.warning(f"nacos:服务实例未注册,尝试重新注册")
|
|
623
|
-
#
|
|
624
|
-
|
|
625
|
-
self.register_with_retry
|
|
644
|
+
# 启动临时线程执行重新注册(替换原线程池)
|
|
645
|
+
retry_thread = threading.Thread(
|
|
646
|
+
target=self.register_with_retry,
|
|
647
|
+
daemon=True,
|
|
648
|
+
name="NacosRetryRegisterThread"
|
|
649
|
+
)
|
|
650
|
+
retry_thread.start()
|
|
626
651
|
|
|
627
652
|
# 20%的概率执行深度检查
|
|
628
653
|
if random.random() < 0.2:
|
|
@@ -673,13 +698,6 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
673
698
|
SYLogger.error(f"nacos:注销服务时发生错误: {e}")
|
|
674
699
|
finally:
|
|
675
700
|
self._shutdown_event.set()
|
|
676
|
-
# 优雅地关闭线程池
|
|
677
|
-
if self._executor and not self._executor._shutdown:
|
|
678
|
-
self._executor.shutdown(wait=True)
|
|
679
|
-
if self._heartbeat_executor and not self._heartbeat_executor._shutdown:
|
|
680
|
-
self._heartbeat_executor.shutdown(wait=True)
|
|
681
|
-
if self._monitor_executor and not self._monitor_executor._shutdown:
|
|
682
|
-
self._monitor_executor.shutdown(wait=True)
|
|
683
701
|
|
|
684
702
|
def handle_signal(self, signum, frame):
|
|
685
703
|
"""处理退出信号"""
|
|
@@ -751,7 +769,8 @@ class NacosService(metaclass=SingletonMeta):
|
|
|
751
769
|
for data_id, callback in list(self._config_listeners.items()):
|
|
752
770
|
new_config = self.get_config(data_id)
|
|
753
771
|
if new_config and new_config != self._config_cache.get(data_id):
|
|
754
|
-
|
|
772
|
+
# 直接执行回调(替换原线程池,配置回调通常为轻量操作)
|
|
773
|
+
callback(new_config)
|
|
755
774
|
self._config_cache[data_id] = new_config
|
|
756
775
|
except Exception as e:
|
|
757
776
|
SYLogger.error(f"nacos:配置监视线程异常: {str(e)}")
|
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: sycommon-python-lib
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.44
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Python: >=3.10
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
7
|
-
Requires-Dist: aio-pika>=9.5.
|
|
8
|
-
Requires-Dist: aiohttp>=3.13.
|
|
7
|
+
Requires-Dist: aio-pika>=9.5.8
|
|
8
|
+
Requires-Dist: aiohttp>=3.13.2
|
|
9
9
|
Requires-Dist: decorator>=5.2.1
|
|
10
|
-
Requires-Dist: fastapi>=0.
|
|
11
|
-
Requires-Dist: kafka-python>=2.2.
|
|
10
|
+
Requires-Dist: fastapi>=0.121.2
|
|
11
|
+
Requires-Dist: kafka-python>=2.2.16
|
|
12
12
|
Requires-Dist: loguru>=0.7.3
|
|
13
13
|
Requires-Dist: mysql-connector-python>=9.5.0
|
|
14
14
|
Requires-Dist: nacos-sdk-python>=2.0.9
|
|
15
|
-
Requires-Dist: pydantic>=2.12.
|
|
15
|
+
Requires-Dist: pydantic>=2.12.4
|
|
16
16
|
Requires-Dist: python-dotenv>=1.2.1
|
|
17
17
|
Requires-Dist: pyyaml>=6.0.3
|
|
18
18
|
Requires-Dist: sqlalchemy>=2.0.44
|
|
19
|
-
Requires-Dist: starlette>=0.
|
|
19
|
+
Requires-Dist: starlette>=0.49.3
|
|
20
20
|
Requires-Dist: uuid>=1.30
|
|
21
21
|
Requires-Dist: uvicorn>=0.38.0
|
|
22
22
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
command/cli.py,sha256=bP2LCLkRvfETIwWkVD70q5xFxMI4D3BpH09Ws1f-ENc,5849
|
|
2
2
|
sycommon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
sycommon/services.py,sha256=
|
|
3
|
+
sycommon/services.py,sha256=LHMRxxRvLAiekkVspaQClBgAR_jLKqiJpd5hqF74MIE,11369
|
|
4
4
|
sycommon/config/Config.py,sha256=9yO5b8WfvEDvkyrGrlwrLFasgh_-MjcEvGF20Gz5Xo4,3041
|
|
5
5
|
sycommon/config/DatabaseConfig.py,sha256=ILiUuYT9_xJZE2W-RYuC3JCt_YLKc1sbH13-MHIOPhg,804
|
|
6
6
|
sycommon/config/EmbeddingConfig.py,sha256=gPKwiDYbeu1GpdIZXMmgqM7JqBIzCXi0yYuGRLZooMI,362
|
|
@@ -36,24 +36,24 @@ sycommon/models/mqmsg_model.py,sha256=cxn0M5b0utQK6crMYmL-1waeGYHvK3AlGaRy23clqT
|
|
|
36
36
|
sycommon/models/mqsend_config.py,sha256=NQX9dc8PpuquMG36GCVhJe8omAW1KVXXqr6lSRU6D7I,268
|
|
37
37
|
sycommon/models/sso_user.py,sha256=i1WAN6k5sPcPApQEdtjpWDy7VrzWLpOrOQewGLGoGIw,2702
|
|
38
38
|
sycommon/rabbitmq/rabbitmq_client.py,sha256=aRS7sYN4RAJ210Bl-Bh0qR7mXpaag_5YygNXz6ryhtQ,19677
|
|
39
|
-
sycommon/rabbitmq/rabbitmq_pool.py,sha256=
|
|
40
|
-
sycommon/rabbitmq/rabbitmq_service.py,sha256=
|
|
39
|
+
sycommon/rabbitmq/rabbitmq_pool.py,sha256=ckHEpvPXS4TdHRS_2eGyiQ7kej3P7Wnyk3nttAjk04o,17724
|
|
40
|
+
sycommon/rabbitmq/rabbitmq_service.py,sha256=L7SKf8Ws2wZvF0YGU1ZUiqg3z_viSNmMR0Zz4_5jBSY,39775
|
|
41
41
|
sycommon/sse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
42
|
sycommon/sse/event.py,sha256=k_rBJy23R7crtzQeetT0Q73D8o5-5p-eESGSs_BPOj0,2797
|
|
43
43
|
sycommon/sse/sse.py,sha256=__CfWEcYxOxQ-HpLor4LTZ5hLWqw9-2X7CngqbVHsfw,10128
|
|
44
44
|
sycommon/synacos/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
45
|
sycommon/synacos/example.py,sha256=61XL03tU8WTNOo3FUduf93F2fAwah1S0lbH1ufhRhRk,5739
|
|
46
46
|
sycommon/synacos/example2.py,sha256=adUaru3Hy482KrOA17DfaC4nwvLj8etIDS_KrWLWmCU,4811
|
|
47
|
-
sycommon/synacos/feign.py,sha256
|
|
47
|
+
sycommon/synacos/feign.py,sha256=-2tuGCqoSM3ddSoSz7h1RJTB06hn8K26v_1ev4qLsTU,7728
|
|
48
48
|
sycommon/synacos/feign_client.py,sha256=JxzxohrsscQNlAjRVo_3ZQrMQSfVHFOtRYyEnP6sDGk,15205
|
|
49
|
-
sycommon/synacos/nacos_service.py,sha256=
|
|
49
|
+
sycommon/synacos/nacos_service.py,sha256=Hxd3fQOR53iujUjCboJoQum5vAXxQ9sdc8YlDug1OF0,35092
|
|
50
50
|
sycommon/synacos/param.py,sha256=KcfSkxnXOa0TGmCjY8hdzU9pzUsA8-4PeyBKWI2-568,1765
|
|
51
51
|
sycommon/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
52
|
sycommon/tools/docs.py,sha256=OPj2ETheuWjXLyaXtaZPbwmJKfJaYXV5s4XMVAUNrms,1607
|
|
53
53
|
sycommon/tools/snowflake.py,sha256=DdEj3T5r5OEvikp3puxqmmmz6BrggxomoSlnsRFb5dM,1174
|
|
54
54
|
sycommon/tools/timing.py,sha256=OiiE7P07lRoMzX9kzb8sZU9cDb0zNnqIlY5pWqHcnkY,2064
|
|
55
|
-
sycommon_python_lib-0.1.
|
|
56
|
-
sycommon_python_lib-0.1.
|
|
57
|
-
sycommon_python_lib-0.1.
|
|
58
|
-
sycommon_python_lib-0.1.
|
|
59
|
-
sycommon_python_lib-0.1.
|
|
55
|
+
sycommon_python_lib-0.1.44.dist-info/METADATA,sha256=e_42ggxA1f4lcE0zFt2uvEPbT_Q5pANx_Dpb6HFIrVQ,7037
|
|
56
|
+
sycommon_python_lib-0.1.44.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
57
|
+
sycommon_python_lib-0.1.44.dist-info/entry_points.txt,sha256=q_h2nbvhhmdnsOUZEIwpuoDjaNfBF9XqppDEmQn9d_A,46
|
|
58
|
+
sycommon_python_lib-0.1.44.dist-info/top_level.txt,sha256=98CJ-cyM2WIKxLz-Pf0AitWLhJyrfXvyY8slwjTXNuc,17
|
|
59
|
+
sycommon_python_lib-0.1.44.dist-info/RECORD,,
|
|
File without changes
|
{sycommon_python_lib-0.1.43.dist-info → sycommon_python_lib-0.1.44.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
|
File without changes
|