jettask 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. jettask/__init__.py +60 -2
  2. jettask/cli.py +314 -228
  3. jettask/config/__init__.py +9 -1
  4. jettask/config/config.py +245 -0
  5. jettask/config/env_loader.py +381 -0
  6. jettask/config/lua_scripts.py +158 -0
  7. jettask/config/nacos_config.py +132 -5
  8. jettask/core/__init__.py +1 -1
  9. jettask/core/app.py +1573 -666
  10. jettask/core/app_importer.py +33 -16
  11. jettask/core/container.py +532 -0
  12. jettask/core/task.py +1 -4
  13. jettask/core/unified_manager_base.py +2 -2
  14. jettask/executor/__init__.py +38 -0
  15. jettask/executor/core.py +625 -0
  16. jettask/executor/executor.py +338 -0
  17. jettask/executor/orchestrator.py +290 -0
  18. jettask/executor/process_entry.py +638 -0
  19. jettask/executor/task_executor.py +317 -0
  20. jettask/messaging/__init__.py +68 -0
  21. jettask/messaging/event_pool.py +2188 -0
  22. jettask/messaging/reader.py +519 -0
  23. jettask/messaging/registry.py +266 -0
  24. jettask/messaging/scanner.py +369 -0
  25. jettask/messaging/sender.py +312 -0
  26. jettask/persistence/__init__.py +118 -0
  27. jettask/persistence/backlog_monitor.py +567 -0
  28. jettask/{backend/data_access.py → persistence/base.py} +58 -57
  29. jettask/persistence/consumer.py +315 -0
  30. jettask/{core → persistence}/db_manager.py +23 -22
  31. jettask/persistence/maintenance.py +81 -0
  32. jettask/persistence/message_consumer.py +259 -0
  33. jettask/{backend/namespace_data_access.py → persistence/namespace.py} +66 -98
  34. jettask/persistence/offline_recovery.py +196 -0
  35. jettask/persistence/queue_discovery.py +215 -0
  36. jettask/persistence/task_persistence.py +218 -0
  37. jettask/persistence/task_updater.py +583 -0
  38. jettask/scheduler/__init__.py +2 -2
  39. jettask/scheduler/loader.py +6 -5
  40. jettask/scheduler/run_scheduler.py +1 -1
  41. jettask/scheduler/scheduler.py +7 -7
  42. jettask/scheduler/{unified_scheduler_manager.py → scheduler_coordinator.py} +18 -13
  43. jettask/task/__init__.py +16 -0
  44. jettask/{router.py → task/router.py} +26 -8
  45. jettask/task/task_center/__init__.py +9 -0
  46. jettask/task/task_executor.py +318 -0
  47. jettask/task/task_registry.py +291 -0
  48. jettask/test_connection_monitor.py +73 -0
  49. jettask/utils/__init__.py +31 -1
  50. jettask/{monitor/run_backlog_collector.py → utils/backlog_collector.py} +1 -1
  51. jettask/utils/db_connector.py +1629 -0
  52. jettask/{db_init.py → utils/db_init.py} +1 -1
  53. jettask/utils/rate_limit/__init__.py +30 -0
  54. jettask/utils/rate_limit/concurrency_limiter.py +665 -0
  55. jettask/utils/rate_limit/config.py +145 -0
  56. jettask/utils/rate_limit/limiter.py +41 -0
  57. jettask/utils/rate_limit/manager.py +269 -0
  58. jettask/utils/rate_limit/qps_limiter.py +154 -0
  59. jettask/utils/rate_limit/task_limiter.py +384 -0
  60. jettask/utils/serializer.py +3 -0
  61. jettask/{monitor/stream_backlog_monitor.py → utils/stream_backlog.py} +14 -6
  62. jettask/utils/time_sync.py +173 -0
  63. jettask/webui/__init__.py +27 -0
  64. jettask/{api/v1 → webui/api}/alerts.py +1 -1
  65. jettask/{api/v1 → webui/api}/analytics.py +2 -2
  66. jettask/{api/v1 → webui/api}/namespaces.py +1 -1
  67. jettask/{api/v1 → webui/api}/overview.py +1 -1
  68. jettask/{api/v1 → webui/api}/queues.py +3 -3
  69. jettask/{api/v1 → webui/api}/scheduled.py +1 -1
  70. jettask/{api/v1 → webui/api}/settings.py +1 -1
  71. jettask/{api.py → webui/app.py} +253 -145
  72. jettask/webui/namespace_manager/__init__.py +10 -0
  73. jettask/{multi_namespace_consumer.py → webui/namespace_manager/multi.py} +69 -22
  74. jettask/{unified_consumer_manager.py → webui/namespace_manager/unified.py} +1 -1
  75. jettask/{run.py → webui/run.py} +2 -2
  76. jettask/{services → webui/services}/__init__.py +1 -3
  77. jettask/{services → webui/services}/overview_service.py +34 -16
  78. jettask/{services → webui/services}/queue_service.py +1 -1
  79. jettask/{backend → webui/services}/queue_stats_v2.py +1 -1
  80. jettask/{services → webui/services}/settings_service.py +1 -1
  81. jettask/worker/__init__.py +53 -0
  82. jettask/worker/lifecycle.py +1507 -0
  83. jettask/worker/manager.py +583 -0
  84. jettask/{core/offline_worker_recovery.py → worker/recovery.py} +268 -175
  85. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/METADATA +2 -71
  86. jettask-0.2.20.dist-info/RECORD +145 -0
  87. jettask/__main__.py +0 -140
  88. jettask/api/__init__.py +0 -103
  89. jettask/backend/__init__.py +0 -1
  90. jettask/backend/api/__init__.py +0 -3
  91. jettask/backend/api/v1/__init__.py +0 -17
  92. jettask/backend/api/v1/monitoring.py +0 -431
  93. jettask/backend/api/v1/namespaces.py +0 -504
  94. jettask/backend/api/v1/queues.py +0 -342
  95. jettask/backend/api/v1/tasks.py +0 -367
  96. jettask/backend/core/__init__.py +0 -3
  97. jettask/backend/core/cache.py +0 -221
  98. jettask/backend/core/database.py +0 -200
  99. jettask/backend/core/exceptions.py +0 -102
  100. jettask/backend/dependencies.py +0 -261
  101. jettask/backend/init_meta_db.py +0 -158
  102. jettask/backend/main.py +0 -1426
  103. jettask/backend/main_unified.py +0 -78
  104. jettask/backend/main_v2.py +0 -394
  105. jettask/backend/models/__init__.py +0 -3
  106. jettask/backend/models/requests.py +0 -236
  107. jettask/backend/models/responses.py +0 -230
  108. jettask/backend/namespace_api_old.py +0 -267
  109. jettask/backend/services/__init__.py +0 -3
  110. jettask/backend/start.py +0 -42
  111. jettask/backend/unified_api_router.py +0 -1541
  112. jettask/cleanup_deprecated_tables.sql +0 -16
  113. jettask/core/consumer_manager.py +0 -1695
  114. jettask/core/delay_scanner.py +0 -256
  115. jettask/core/event_pool.py +0 -1700
  116. jettask/core/heartbeat_process.py +0 -222
  117. jettask/core/task_batch.py +0 -153
  118. jettask/core/worker_scanner.py +0 -271
  119. jettask/executors/__init__.py +0 -5
  120. jettask/executors/asyncio.py +0 -876
  121. jettask/executors/base.py +0 -30
  122. jettask/executors/common.py +0 -148
  123. jettask/executors/multi_asyncio.py +0 -309
  124. jettask/gradio_app.py +0 -570
  125. jettask/integrated_gradio_app.py +0 -1088
  126. jettask/main.py +0 -0
  127. jettask/monitoring/__init__.py +0 -3
  128. jettask/pg_consumer.py +0 -1896
  129. jettask/run_monitor.py +0 -22
  130. jettask/run_webui.py +0 -148
  131. jettask/scheduler/multi_namespace_scheduler.py +0 -294
  132. jettask/scheduler/unified_manager.py +0 -450
  133. jettask/task_center_client.py +0 -150
  134. jettask/utils/serializer_optimized.py +0 -33
  135. jettask/webui_exceptions.py +0 -67
  136. jettask-0.2.18.dist-info/RECORD +0 -150
  137. /jettask/{constants.py → config/constants.py} +0 -0
  138. /jettask/{backend/config.py → config/task_center.py} +0 -0
  139. /jettask/{pg_consumer → messaging/pg_consumer}/pg_consumer_v2.py +0 -0
  140. /jettask/{pg_consumer → messaging/pg_consumer}/sql/add_execution_time_field.sql +0 -0
  141. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_new_tables.sql +0 -0
  142. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_tables_v3.sql +0 -0
  143. /jettask/{pg_consumer → messaging/pg_consumer}/sql/migrate_to_new_structure.sql +0 -0
  144. /jettask/{pg_consumer → messaging/pg_consumer}/sql/modify_time_fields.sql +0 -0
  145. /jettask/{pg_consumer → messaging/pg_consumer}/sql_utils.py +0 -0
  146. /jettask/{models.py → persistence/models.py} +0 -0
  147. /jettask/scheduler/{manager.py → task_crud.py} +0 -0
  148. /jettask/{schema.sql → schemas/schema.sql} +0 -0
  149. /jettask/{task_center.py → task/task_center/client.py} +0 -0
  150. /jettask/{monitoring → utils}/file_watcher.py +0 -0
  151. /jettask/{services/redis_monitor_service.py → utils/redis_monitor.py} +0 -0
  152. /jettask/{api/v1 → webui/api}/__init__.py +0 -0
  153. /jettask/{webui_config.py → webui/config.py} +0 -0
  154. /jettask/{webui_models → webui/models}/__init__.py +0 -0
  155. /jettask/{webui_models → webui/models}/namespace.py +0 -0
  156. /jettask/{services → webui/services}/alert_service.py +0 -0
  157. /jettask/{services → webui/services}/analytics_service.py +0 -0
  158. /jettask/{services → webui/services}/scheduled_task_service.py +0 -0
  159. /jettask/{services → webui/services}/task_service.py +0 -0
  160. /jettask/{webui_sql → webui/sql}/batch_upsert_functions.sql +0 -0
  161. /jettask/{webui_sql → webui/sql}/verify_database.sql +0 -0
  162. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/WHEEL +0 -0
  163. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/entry_points.txt +0 -0
  164. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/licenses/LICENSE +0 -0
  165. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1629 @@
1
+ """
2
+ 数据库连接工具类
3
+
4
+ 提供统一的 Redis 和 PostgreSQL 连接管理,避免代码重复。
5
+ 所有数据库连接池均为全局单例,复用连接池以节省资源。
6
+
7
+ 文件结构:
8
+ ============================================================
9
+ 1. 导入和全局配置 (第7-23行)
10
+ - 第三方库导入
11
+ - Logger 初始化
12
+
13
+ 2. 全局变量 (第25-38行)
14
+ - Redis 连接池缓存(同步/异步、文本/二进制)
15
+ - PostgreSQL 引擎和会话工厂缓存
16
+
17
+ 3. 工具类 (第41-83行)
18
+ - InfiniteRetry: 无限重试策略
19
+
20
+ 4. 自定义 Redis 连接池实现 (第85-453行)
21
+ - IdleTrackingBlockingConnectionPool: 同步连接池(带空闲回收)
22
+ - AsyncIdleTrackingBlockingConnectionPool: 异步连接池(带空闲回收)
23
+
24
+ 5. 连接池获取函数 (第455-740行)
25
+ - get_sync_redis_pool: 获取同步 Redis 连接池
26
+ - get_async_redis_pool: 获取异步 Redis 连接池
27
+ - get_async_redis_pool_for_pubsub: 获取 PubSub 专用连接池
28
+ - get_pg_engine_and_factory: 获取 PostgreSQL 引擎和会话工厂
29
+
30
+ 6. 配置和连接器类 (第742-1249行)
31
+ - DBConfig: 数据库配置数据类
32
+ - SyncRedisConnector: 同步 Redis 连接器
33
+ - RedisConnector: 异步 Redis 连接器
34
+ - PostgreSQLConnector: PostgreSQL 连接器
35
+ - ConnectionManager: 统一连接管理器
36
+
37
+ 7. 全局客户端实例管理 (第1251-1378行)
38
+ - get_sync_redis_client: 获取全局同步 Redis 客户端
39
+ - get_async_redis_client: 获取全局异步 Redis 客户端
40
+ - clear_all_cache: 清理所有缓存
41
+
42
+ ============================================================
43
+ """
44
+
45
+ # ============================================================
46
+ # Section 1: 导入和全局配置
47
+ # ============================================================
48
+ import os
49
+ import logging
50
+ import traceback
51
+ import socket
52
+ import time
53
+ import threading
54
+ import asyncio
55
+ from typing import Optional, Dict, Any, Union
56
+ from contextlib import asynccontextmanager, contextmanager
57
+ import redis as sync_redis
58
+ import redis.asyncio as redis
59
+ from redis.asyncio import BlockingConnectionPool
60
+ from redis.backoff import ExponentialBackoff
61
+ from redis.retry import Retry
62
+ from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
63
+
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ # ============================================================
68
+ # Section 2: 全局变量
69
+ # ============================================================
70
+
71
+ class _PoolRegistry:
72
+ """
73
+ 全局连接池注册表(单例模式)
74
+
75
+ 统一管理所有数据库连接池和客户端实例,避免全局变量分散
76
+ """
77
+
78
+ # Redis 连接池缓存
79
+ sync_redis_pools: Dict[str, sync_redis.ConnectionPool] = {}
80
+ sync_binary_redis_pools: Dict[str, sync_redis.ConnectionPool] = {}
81
+ async_redis_pools: Dict[str, redis.ConnectionPool] = {}
82
+ async_binary_redis_pools: Dict[str, redis.ConnectionPool] = {}
83
+
84
+ # PostgreSQL 引擎和会话工厂缓存
85
+ pg_engines: Dict[str, Any] = {}
86
+ pg_session_factories: Dict[str, async_sessionmaker] = {}
87
+
88
+ # Redis 客户端实例缓存
89
+ sync_redis_clients: Dict[str, sync_redis.StrictRedis] = {}
90
+ sync_binary_redis_clients: Dict[str, sync_redis.StrictRedis] = {}
91
+ async_redis_clients: Dict[str, redis.StrictRedis] = {}
92
+ async_binary_redis_clients: Dict[str, redis.StrictRedis] = {}
93
+
94
+ @classmethod
95
+ def clear_all(cls):
96
+ """清空所有缓存"""
97
+ cls.sync_redis_pools.clear()
98
+ cls.sync_binary_redis_pools.clear()
99
+ cls.async_redis_pools.clear()
100
+ cls.async_binary_redis_pools.clear()
101
+ cls.pg_engines.clear()
102
+ cls.pg_session_factories.clear()
103
+ cls.sync_redis_clients.clear()
104
+ cls.sync_binary_redis_clients.clear()
105
+ cls.async_redis_clients.clear()
106
+ cls.async_binary_redis_clients.clear()
107
+
108
+
109
+ # 兼容旧代码:保持旧的全局变量引用(指向 _PoolRegistry)
110
+ _sync_redis_pools = _PoolRegistry.sync_redis_pools
111
+ _sync_binary_redis_pools = _PoolRegistry.sync_binary_redis_pools
112
+ _async_redis_pools = _PoolRegistry.async_redis_pools
113
+ _async_binary_redis_pools = _PoolRegistry.async_binary_redis_pools
114
+ _pg_engines = _PoolRegistry.pg_engines
115
+ _pg_session_factories = _PoolRegistry.pg_session_factories
116
+
117
+
118
+ # ============================================================
119
+ # Section 3: 工具类
120
+ # ============================================================
121
+
122
+ class InfiniteRetry(Retry):
123
+ """无限重试的 Retry 策略"""
124
+
125
+ def __init__(self):
126
+ # 使用指数退避,最大间隔30秒
127
+ super().__init__(
128
+ ExponentialBackoff(cap=30, base=1),
129
+ retries=-1 # -1 表示无限重试
130
+ )
131
+
132
+ def call_with_retry(self, do, fail):
133
+ """
134
+ 执行操作,失败时无限重试
135
+
136
+ Args:
137
+ do: 要执行的函数
138
+ fail: 失败时的回调函数
139
+ """
140
+ failures = 0
141
+ backoff = self._backoff
142
+
143
+ while True:
144
+ try:
145
+ return do()
146
+ except Exception as error:
147
+ failures += 1
148
+
149
+ # 记录重试日志
150
+ if failures == 1 or failures % 10 == 0: # 第1次和每10次记录一次
151
+ logger.warning(
152
+ f"Redis 连接失败 (第 {failures} 次), 将在 {backoff.compute(failures)} 秒后重试: {error}"
153
+ )
154
+
155
+ # 调用失败回调
156
+ fail(error)
157
+
158
+ # 等待后重试
159
+ time.sleep(backoff.compute(failures))
160
+
161
+ # 继续重试,永不放弃
162
+
163
+
164
+ # ============================================================
165
+ # Section 4: 自定义 Redis 连接池实现
166
+ # ============================================================
167
+
168
+ class IdleTrackingBlockingConnectionPool(sync_redis.BlockingConnectionPool):
169
+ """
170
+ 带空闲连接跟踪和自动回收的同步阻塞连接池
171
+
172
+ 核心机制:
173
+ 1. 在 get_connection() 时记录连接的获取时间戳
174
+ 2. 在 release() 时更新连接的最后使用时间戳
175
+ 3. 使用后台线程定期检查并关闭超过 max_idle_time 的空闲连接
176
+ """
177
+
178
+ def __init__(self, *args, max_idle_time: int = 300, idle_check_interval: int = 60, **kwargs):
179
+ """
180
+ Args:
181
+ max_idle_time: 最大空闲时间(秒),超过此时间的连接将被关闭,默认300秒
182
+ idle_check_interval: 空闲检查间隔(秒),默认60秒
183
+ """
184
+ super().__init__(*args, **kwargs)
185
+
186
+ self.max_idle_time = max_idle_time
187
+ self.idle_check_interval = idle_check_interval
188
+
189
+ # 连接最后使用时间戳字典 {connection_id: last_use_timestamp}
190
+ self._connection_last_use: Dict[int, float] = {}
191
+ self._connection_last_use_lock = threading.RLock()
192
+
193
+ # 启动空闲连接清理线程
194
+ self._cleaner_thread = None
195
+ self._stop_cleaner = threading.Event()
196
+
197
+ if max_idle_time > 0 and idle_check_interval > 0:
198
+ self._start_idle_cleaner()
199
+ # logger.info(f"启动同步空闲连接清理线程: max_idle_time={max_idle_time}s, check_interval={idle_check_interval}s")
200
+
201
+ def get_connection(self, command_name=None, *keys, **options):
202
+ """获取连接时记录获取时间"""
203
+ conn = super().get_connection(command_name, *keys, **options)
204
+ conn_id = id(conn)
205
+ with self._connection_last_use_lock:
206
+ if conn_id not in self._connection_last_use:
207
+ self._connection_last_use[conn_id] = time.time()
208
+ logger.debug(f"连接 {conn_id} 首次获取")
209
+ return conn
210
+
211
+ def release(self, connection):
212
+ """释放连接时更新最后使用时间"""
213
+ conn_id = id(connection)
214
+ current_time = time.time()
215
+ with self._connection_last_use_lock:
216
+ self._connection_last_use[conn_id] = current_time
217
+ logger.debug(f"连接 {conn_id} 释放,更新最后使用时间: {current_time}")
218
+ super().release(connection)
219
+
220
+ def _start_idle_cleaner(self):
221
+ """启动空闲连接清理线程"""
222
+ self._cleaner_thread = threading.Thread(
223
+ target=self._idle_cleaner_loop,
224
+ name="SyncIdleConnectionCleaner",
225
+ daemon=True
226
+ )
227
+ self._cleaner_thread.start()
228
+
229
+ def _idle_cleaner_loop(self):
230
+ """空闲连接清理线程的主循环"""
231
+ while not self._stop_cleaner.wait(self.idle_check_interval):
232
+ try:
233
+ self._cleanup_idle_connections()
234
+ except Exception as e:
235
+ logger.error(f"清理空闲连接时出错: {e}")
236
+ logger.debug(traceback.format_exc())
237
+
238
+ def _cleanup_idle_connections(self):
239
+ """清理空闲连接"""
240
+ current_time = time.time()
241
+ connections_to_keep = []
242
+ connections_to_close = []
243
+
244
+ # 从队列中取出所有连接(非阻塞),并记录初始状态
245
+ connections_to_check = []
246
+ with self._lock:
247
+ initial_total = len(self._connections)
248
+ initial_available = self.pool.qsize()
249
+ initial_in_use = initial_total - initial_available
250
+
251
+ while True:
252
+ try:
253
+ conn = self.pool.get_nowait()
254
+ connections_to_check.append(conn)
255
+ except:
256
+ break
257
+
258
+ available_count = len(connections_to_check)
259
+ logger.debug(f"检查 {available_count} 个可用连接")
260
+
261
+ if available_count <= 2:
262
+ # 保留至少 2 个连接,全部放回
263
+ with self._lock:
264
+ for conn in connections_to_check:
265
+ self.pool.put(conn)
266
+ logger.debug(f"可用连接数 {available_count} <= 2,跳过清理")
267
+ return
268
+
269
+ # 检查每个连接
270
+ for conn in connections_to_check:
271
+ if conn is None:
272
+ # None 占位符,直接放回
273
+ connections_to_keep.append(conn)
274
+ continue
275
+
276
+ conn_id = id(conn)
277
+ with self._connection_last_use_lock:
278
+ last_use = self._connection_last_use.get(conn_id, current_time)
279
+ idle_time = current_time - last_use
280
+
281
+ if idle_time > self.max_idle_time and len(connections_to_keep) + len(connections_to_check) - len(connections_to_close) > 2:
282
+ # 标记为待关闭(确保至少保留2个)
283
+ connections_to_close.append((conn, conn_id, idle_time))
284
+ else:
285
+ # 保留连接
286
+ connections_to_keep.append(conn)
287
+
288
+ # 关闭空闲连接
289
+ closed_count = 0
290
+ for conn, conn_id, idle_time in connections_to_close:
291
+ try:
292
+ # 1. 先从跟踪字典移除(避免其他线程访问)
293
+ with self._connection_last_use_lock:
294
+ self._connection_last_use.pop(conn_id, None)
295
+
296
+ # 2. 断开连接(这会关闭 socket)
297
+ conn.disconnect()
298
+
299
+ # 3. 从连接列表移除(必须在 disconnect 之后)
300
+ with self._lock:
301
+ if conn in self._connections:
302
+ self._connections.remove(conn)
303
+
304
+ closed_count += 1
305
+ logger.debug(f"关闭空闲连接 {conn_id},空闲时间: {idle_time:.1f}s")
306
+
307
+ except Exception as e:
308
+ logger.warning(f"断开连接 {conn_id} 失败: {e}")
309
+ # 失败的连接也放回队列,避免丢失
310
+ connections_to_keep.append(conn)
311
+ # 恢复跟踪
312
+ with self._connection_last_use_lock:
313
+ self._connection_last_use[conn_id] = time.time()
314
+
315
+ # 将保留的连接放回队列
316
+ with self._lock:
317
+ for conn in connections_to_keep:
318
+ self.pool.put(conn)
319
+
320
+ if closed_count > 0:
321
+ with self._lock:
322
+ final_total = len(self._connections)
323
+ final_available = self.pool.qsize()
324
+ final_in_use = final_total - final_available
325
+ logger.info(
326
+ f"空闲连接清理完成: 清理前 {initial_total} (可用: {initial_available}, 使用中: {initial_in_use}), "
327
+ f"关闭 {closed_count} 个, "
328
+ f"剩余 {final_total} (可用: {final_available}, 使用中: {final_in_use})"
329
+ )
330
+
331
+ def _stop_idle_cleaner(self):
332
+ """停止空闲连接清理线程"""
333
+ if self._cleaner_thread and self._cleaner_thread.is_alive():
334
+ self._stop_cleaner.set()
335
+ self._cleaner_thread.join(timeout=5)
336
+ # logger.info("同步空闲连接清理线程已停止")
337
+
338
+ # 清空时间戳字典
339
+ with self._connection_last_use_lock:
340
+ self._connection_last_use.clear()
341
+
342
+ def disconnect(self, inuse_connections: bool = True):
343
+ """断开所有连接,停止清理线程"""
344
+ self._stop_idle_cleaner()
345
+ super().disconnect(inuse_connections)
346
+
347
+
348
+ class AsyncIdleTrackingBlockingConnectionPool(redis.BlockingConnectionPool):
349
+ """
350
+ 带空闲连接跟踪和自动回收的异步阻塞连接池
351
+
352
+ 核心机制:
353
+ 1. 在 get_connection() 时记录连接的获取时间戳
354
+ 2. 在 release() 时更新连接的最后使用时间戳
355
+ 3. 使用 asyncio.Task 定期检查并关闭超过 max_idle_time 的空闲连接
356
+ """
357
+
358
+ def __init__(self, *args, max_idle_time: int = 300, idle_check_interval: int = 60, **kwargs):
359
+ """
360
+ Args:
361
+ max_idle_time: 最大空闲时间(秒),超过此时间的连接将被关闭,默认300秒
362
+ idle_check_interval: 空闲检查间隔(秒),默认60秒
363
+ """
364
+ # 提取自定义参数,避免传递给父类
365
+ kwargs.pop('max_idle_time', None)
366
+ kwargs.pop('idle_check_interval', None)
367
+
368
+ super().__init__(*args, **kwargs)
369
+
370
+ self.max_idle_time = max_idle_time
371
+ self.idle_check_interval = idle_check_interval
372
+
373
+ # 连接最后使用时间戳字典 {connection_id: last_use_timestamp}
374
+ self._connection_last_use: Dict[int, float] = {}
375
+ self._connection_last_use_lock = None # 延迟创建,因为需要事件循环
376
+
377
+ # 启动空闲连接清理任务
378
+ self._cleaner_task = None
379
+ self._stop_cleaner = None # 延迟创建
380
+
381
+ # if max_idle_time > 0 and idle_check_interval > 0:
382
+ # logger.info(f"将在首次使用时启动异步空闲连接清理任务: max_idle_time={max_idle_time}s, check_interval={idle_check_interval}s")
383
+
384
+ async def get_connection(self, command_name=None, *keys, **options):
385
+ """获取连接时记录获取时间"""
386
+ # 确保清理任务已启动
387
+ await self._ensure_cleaner_task_started()
388
+ conn = await super().get_connection(command_name, *keys, **options)
389
+ conn_id = id(conn)
390
+
391
+ # 延迟初始化锁
392
+ if self._connection_last_use_lock is None:
393
+ self._connection_last_use_lock = asyncio.Lock()
394
+
395
+ async with self._connection_last_use_lock:
396
+ if conn_id not in self._connection_last_use:
397
+ self._connection_last_use[conn_id] = time.time()
398
+ logger.debug(f"连接 {conn_id} 首次获取")
399
+ return conn
400
+
401
+ async def release(self, connection):
402
+ """释放连接时更新最后使用时间(异步方法)"""
403
+ conn_id = id(connection)
404
+ current_time = time.time()
405
+
406
+ # 延迟初始化锁
407
+ if self._connection_last_use_lock is None:
408
+ self._connection_last_use_lock = asyncio.Lock()
409
+
410
+ # 使用异步锁更新时间戳
411
+ async with self._connection_last_use_lock:
412
+ self._connection_last_use[conn_id] = current_time
413
+ logger.debug(f"连接 {conn_id} 释放,更新最后使用时间: {current_time}")
414
+
415
+ await super().release(connection)
416
+
417
+ async def _ensure_cleaner_task_started(self):
418
+ """确保清理任务已启动"""
419
+ if self.max_idle_time > 0 and self.idle_check_interval > 0 and self._cleaner_task is None:
420
+ # 延迟初始化事件
421
+ if self._stop_cleaner is None:
422
+ self._stop_cleaner = asyncio.Event()
423
+ self._cleaner_task = asyncio.create_task(self._idle_cleaner_loop())
424
+ # logger.info("异步空闲连接清理任务已启动")
425
+
426
+ async def _idle_cleaner_loop(self):
427
+ """空闲连接清理任务的主循环"""
428
+ while True:
429
+ try:
430
+ # 等待指定间隔或停止信号
431
+ await asyncio.wait_for(
432
+ self._stop_cleaner.wait(),
433
+ timeout=self.idle_check_interval
434
+ )
435
+ # 如果收到停止信号,退出循环
436
+ break
437
+ except asyncio.TimeoutError:
438
+ # 超时,执行清理
439
+ try:
440
+ await self._cleanup_idle_connections()
441
+ except Exception as e:
442
+ logger.error(f"清理空闲连接时出错: {e}")
443
+ logger.debug(traceback.format_exc())
444
+
445
+ async def _cleanup_idle_connections(self):
446
+ """清理空闲连接"""
447
+ if self._connection_last_use_lock is None:
448
+ return
449
+
450
+ current_time = time.time()
451
+ connections_to_keep = []
452
+ connections_to_close = []
453
+
454
+ # 从 _available_connections 获取所有可用连接,并记录初始状态
455
+ async with self._lock:
456
+ if not hasattr(self, '_available_connections'):
457
+ return
458
+ connections_to_check = list(self._available_connections)
459
+ initial_available = len(self._available_connections)
460
+ initial_in_use = len(self._in_use_connections) if hasattr(self, '_in_use_connections') else 0
461
+ initial_total = initial_available + initial_in_use
462
+
463
+ available_count = len(connections_to_check)
464
+ logger.debug(f"检查 {available_count} 个可用连接")
465
+
466
+ if available_count <= 2:
467
+ logger.debug(f"可用连接数 {available_count} <= 2,跳过清理")
468
+ return
469
+
470
+ # 检查每个连接
471
+ for conn in connections_to_check:
472
+ if conn is None:
473
+ connections_to_keep.append(conn)
474
+ continue
475
+
476
+ conn_id = id(conn)
477
+ async with self._connection_last_use_lock:
478
+ last_use = self._connection_last_use.get(conn_id, current_time)
479
+ idle_time = current_time - last_use
480
+
481
+ if idle_time > self.max_idle_time and len(connections_to_keep) + len(connections_to_check) - len(connections_to_close) > 2:
482
+ # 标记为待关闭(确保至少保留2个)
483
+ connections_to_close.append((conn, conn_id, idle_time))
484
+ else:
485
+ # 保留连接
486
+ connections_to_keep.append(conn)
487
+
488
+ # 关闭空闲连接
489
+ closed_count = 0
490
+ for conn, conn_id, idle_time in connections_to_close:
491
+ try:
492
+ # 1. 先从跟踪字典移除(避免其他协程访问)
493
+ async with self._connection_last_use_lock:
494
+ self._connection_last_use.pop(conn_id, None)
495
+
496
+ # 2. 断开连接(这会关闭 socket)
497
+ await conn.disconnect()
498
+
499
+ # 3. 从连接集合/列表移除(必须在 disconnect 之后)
500
+ async with self._lock:
501
+ if hasattr(self, '_available_connections') and conn in self._available_connections:
502
+ self._available_connections.remove(conn)
503
+ if hasattr(self, '_in_use_connections') and conn in self._in_use_connections:
504
+ # _in_use_connections 可能是 set 或 list,尝试两种方法
505
+ try:
506
+ self._in_use_connections.discard(conn)
507
+ except AttributeError:
508
+ self._in_use_connections.remove(conn)
509
+
510
+ closed_count += 1
511
+ logger.debug(f"关闭空闲连接 {conn_id},空闲时间: {idle_time:.1f}s")
512
+
513
+ except Exception as e:
514
+ import traceback
515
+ traceback.print_exc()
516
+ logger.warning(f"断开连接 {conn_id} 失败: {e}")
517
+ logger.debug(traceback.format_exc())
518
+ # 恢复跟踪
519
+ async with self._connection_last_use_lock:
520
+ self._connection_last_use[conn_id] = time.time()
521
+
522
+ # if closed_count > 0:
523
+ # async with self._lock:
524
+ # final_available = len(self._available_connections) if hasattr(self, '_available_connections') else 0
525
+ # final_in_use = len(self._in_use_connections) if hasattr(self, '_in_use_connections') else 0
526
+ # final_total = final_available + final_in_use
527
+ # logger.info(
528
+ # f"空闲连接清理完成: 清理前 {initial_total}, "
529
+ # f"关闭 {closed_count} 个, "
530
+ # f"剩余 {final_total} "
531
+ # f"{len(self._connection_last_use)=}"
532
+ # )
533
+
534
+ async def _stop_idle_cleaner(self):
535
+ """停止空闲连接清理任务"""
536
+ if self._cleaner_task and not self._cleaner_task.done():
537
+ if self._stop_cleaner:
538
+ self._stop_cleaner.set()
539
+ try:
540
+ await asyncio.wait_for(self._cleaner_task, timeout=5)
541
+ except asyncio.TimeoutError:
542
+ self._cleaner_task.cancel()
543
+ # logger.info("异步空闲连接清理任务已停止")
544
+
545
+ # 清空时间戳字典
546
+ if self._connection_last_use_lock:
547
+ async with self._connection_last_use_lock:
548
+ self._connection_last_use.clear()
549
+ else:
550
+ self._connection_last_use.clear()
551
+
552
+ async def disconnect(self, inuse_connections: bool = True):
553
+ """断开所有连接,停止清理任务"""
554
+ await self._stop_idle_cleaner()
555
+ await super().disconnect(inuse_connections)
556
+
557
+
558
+ # ============================================================
559
+ # Section 5: 连接池获取函数
560
+ # ============================================================
561
+
562
+ def _get_socket_keepalive_options() -> Dict[int, int]:
563
+ """构建 socket keepalive 选项(仅在 Linux 上使用)"""
564
+ socket_keepalive_options = {}
565
+ if hasattr(socket, 'TCP_KEEPIDLE'):
566
+ socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
567
+ if hasattr(socket, 'TCP_KEEPINTVL'):
568
+ socket_keepalive_options[socket.TCP_KEEPINTVL] = 3
569
+ if hasattr(socket, 'TCP_KEEPCNT'):
570
+ socket_keepalive_options[socket.TCP_KEEPCNT] = 5
571
+ return socket_keepalive_options
572
+
573
+
574
+ def get_sync_redis_pool(
575
+ redis_url: str,
576
+ decode_responses: bool = True,
577
+ max_connections: int = 200,
578
+ socket_connect_timeout: int = 30,
579
+ socket_timeout: int = 60,
580
+ timeout: int = 60,
581
+ health_check_interval: int = 30,
582
+ max_idle_time: int = 120,
583
+ idle_check_interval: int = 10,
584
+ **pool_kwargs
585
+ ) -> IdleTrackingBlockingConnectionPool:
586
+ """
587
+ 获取或创建同步 Redis 连接池(全局单例,使用自定义 IdleTrackingBlockingConnectionPool)
588
+
589
+ 连接池优化策略:
590
+ 1. BlockingConnectionPool:连接数达到上限时阻塞等待,避免连接泄漏
591
+ 2. health_check_interval:利用 redis-py 内置健康检查,自动清理僵尸连接
592
+ 3. TCP Keepalive:通过系统级保活机制检测断开的连接
593
+ 4. 无限重试:网络抖动时自动重连,提高可用性
594
+ 5. 空闲连接自动回收:跟踪连接真实使用时间,自动关闭长时间空闲的连接
595
+
596
+ Args:
597
+ redis_url: Redis 连接 URL
598
+ decode_responses: 是否解码响应为字符串
599
+ max_connections: 连接池最大连接数(默认200)
600
+ socket_connect_timeout: Socket 连接超时(秒),默认30秒
601
+ socket_timeout: Socket 读写超时(秒),默认60秒
602
+ timeout: 等待可用连接的超时时间(秒),默认60秒
603
+ health_check_interval: 健康检查间隔(秒),默认30秒
604
+ - 每次从池中获取连接时,如果距离上次使用超过此时间,会自动发送 PING 检查
605
+ - 设置为 0 禁用健康检查(不推荐)
606
+ - 推荐值:30-60秒,需小于 Redis 服务器 timeout 配置
607
+ max_idle_time: 最大空闲时间(秒),超过此时间的连接将被关闭,默认300秒(5分钟)
608
+ - 设置为 0 禁用空闲连接回收
609
+ idle_check_interval: 空闲检查间隔(秒),默认60秒
610
+ **pool_kwargs: 其他连接池参数
611
+
612
+ Returns:
613
+ IdleTrackingBlockingConnectionPool: 带空闲连接自动回收的阻塞连接池
614
+
615
+ 推荐配置(需与 Redis 服务器配置协同):
616
+ redis.conf:
617
+ timeout 60 # 服务器端60秒空闲关闭
618
+ tcp-keepalive 30 # 30秒发送一次保活探测
619
+
620
+ 客户端:
621
+ health_check_interval=30 # < server timeout (60s)
622
+ socket_keepalive=True
623
+ TCP_KEEPIDLE=60 # < server timeout (60s)
624
+ max_idle_time=300 # 5分钟空闲后回收连接
625
+ idle_check_interval=60 # 每分钟检查一次
626
+ """
627
+ # 选择连接池缓存字典
628
+ pool_cache = _sync_redis_pools if decode_responses else _sync_binary_redis_pools
629
+
630
+ # 构建缓存键(包含影响连接的关键参数)
631
+ cache_key = redis_url
632
+
633
+ if cache_key not in pool_cache:
634
+ socket_keepalive_options = _get_socket_keepalive_options()
635
+
636
+ # 创建无限重试实例
637
+ infinite_retry = InfiniteRetry()
638
+
639
+ # 使用 IdleTrackingBlockingConnectionPool.from_url 创建连接池
640
+ pool = IdleTrackingBlockingConnectionPool.from_url(
641
+ redis_url,
642
+ decode_responses=decode_responses,
643
+ max_connections=max_connections,
644
+ timeout=timeout, # BlockingConnectionPool 特有参数:等待连接的超时时间
645
+ retry_on_timeout=True,
646
+ retry_on_error=[ConnectionError, TimeoutError, OSError, BrokenPipeError],
647
+ retry=infinite_retry, # 使用无限重试策略
648
+ socket_keepalive=True,
649
+ socket_keepalive_options=socket_keepalive_options if socket_keepalive_options else None,
650
+ health_check_interval=health_check_interval, # 利用 redis-py 内置的健康检查机制
651
+ socket_connect_timeout=socket_connect_timeout,
652
+ socket_timeout=socket_timeout,
653
+ max_idle_time=max_idle_time, # 空闲连接回收配置
654
+ idle_check_interval=idle_check_interval,
655
+ **pool_kwargs
656
+ )
657
+
658
+ pool_cache[cache_key] = pool
659
+
660
+ logger.debug(
661
+ f"创建同步Redis阻塞连接池 (max={max_connections}, timeout={timeout}s, "
662
+ f"health_check={health_check_interval}s, max_idle={max_idle_time}s): "
663
+ f"{redis_url}, decode={decode_responses}"
664
+ )
665
+
666
+ return pool_cache[cache_key]
667
+
668
+
669
+ def get_async_redis_pool(
670
+ redis_url: str,
671
+ decode_responses: bool = True,
672
+ max_connections: int = 200,
673
+ socket_connect_timeout: int = 30,
674
+ socket_timeout: int = 60,
675
+ socket_keepalive: bool = True,
676
+ health_check_interval: int = 30,
677
+ timeout: int = 60,
678
+ max_idle_time: int = 120,
679
+ idle_check_interval: int = 10,
680
+ **pool_kwargs
681
+ ) -> AsyncIdleTrackingBlockingConnectionPool:
682
+ """
683
+ 获取或创建异步 Redis 连接池(全局单例,使用自定义 AsyncIdleTrackingBlockingConnectionPool)
684
+
685
+ 连接池优化策略:
686
+ 1. BlockingConnectionPool:连接数达到上限时阻塞等待,避免连接泄漏
687
+ 2. health_check_interval:利用 redis-py 内置健康检查,自动清理僵尸连接
688
+ 3. TCP Keepalive:通过系统级保活机制检测断开的连接
689
+ 4. 无限重试:网络抖动时自动重连,提高可用性
690
+ 5. 空闲连接自动回收:跟踪连接真实使用时间,自动关闭长时间空闲的连接
691
+
692
+ Args:
693
+ redis_url: Redis 连接 URL
694
+ decode_responses: 是否解码响应为字符串
695
+ max_connections: 连接池最大连接数(默认200)
696
+ socket_connect_timeout: Socket 连接超时(秒),默认30秒
697
+ socket_timeout: Socket 读写超时(秒),默认60秒,0表示无限等待
698
+ socket_keepalive: 是否启用 socket keepalive
699
+ health_check_interval: 健康检查间隔(秒),默认30秒(推荐30-60秒)
700
+ timeout: 等待可用连接的超时时间(秒),默认60秒
701
+ max_idle_time: 最大空闲时间(秒),超过此时间的连接将被关闭,默认300秒(5分钟)
702
+ - 设置为 0 禁用空闲连接回收
703
+ idle_check_interval: 空闲检查间隔(秒),默认60秒
704
+ **pool_kwargs: 其他连接池参数
705
+
706
+ Returns:
707
+ AsyncIdleTrackingBlockingConnectionPool: 带空闲连接自动回收的异步阻塞连接池
708
+ """
709
+ # 选择连接池缓存字典
710
+ pool_cache = _async_redis_pools if decode_responses else _async_binary_redis_pools
711
+
712
+ # 构建缓存键
713
+ cache_key = redis_url
714
+
715
+ if cache_key not in pool_cache:
716
+ socket_keepalive_options = _get_socket_keepalive_options()
717
+
718
+ # 创建无限重试实例
719
+ infinite_retry = InfiniteRetry()
720
+
721
+ # 构建连接池参数
722
+ pool_params = {
723
+ 'decode_responses': decode_responses,
724
+ 'max_connections': max_connections,
725
+ 'retry_on_timeout': True,
726
+ 'retry_on_error': [ConnectionError, TimeoutError, OSError, BrokenPipeError],
727
+ 'retry': infinite_retry, # 使用无限重试策略
728
+ 'socket_keepalive': socket_keepalive,
729
+ 'health_check_interval': health_check_interval,
730
+ 'socket_connect_timeout': socket_connect_timeout,
731
+ 'max_idle_time': max_idle_time, # 空闲连接回收配置
732
+ 'idle_check_interval': idle_check_interval,
733
+ }
734
+
735
+ # 添加 socket_keepalive_options(如果启用)
736
+ if socket_keepalive and socket_keepalive_options:
737
+ pool_params['socket_keepalive_options'] = socket_keepalive_options
738
+
739
+ # 添加 socket_timeout(0 表示无限等待)
740
+ if socket_timeout > 0:
741
+ pool_params['socket_timeout'] = socket_timeout
742
+
743
+ # 合并其他参数
744
+ pool_params.update(pool_kwargs)
745
+
746
+ # 使用 AsyncIdleTrackingBlockingConnectionPool.from_url 创建连接池
747
+ pool = AsyncIdleTrackingBlockingConnectionPool.from_url(
748
+ redis_url,
749
+ **pool_params
750
+ )
751
+ pool_cache[cache_key] = pool
752
+
753
+ logger.debug(
754
+ f"创建异步Redis阻塞连接池 (max={max_connections}, timeout={timeout}s, "
755
+ f"health_check={health_check_interval}s, max_idle={max_idle_time}s): "
756
+ f"{redis_url}, decode={decode_responses}"
757
+ )
758
+
759
+ return pool_cache[cache_key]
760
+
761
+
762
+ def get_dual_mode_async_redis_client(
763
+ redis_url: str,
764
+ max_connections: int = 200,
765
+ **pool_kwargs
766
+ ) -> tuple[redis.Redis, redis.Redis]:
767
+ """
768
+ 获取双模式异步 Redis 客户端(使用两个独立的连接池)
769
+
770
+ 核心机制:
771
+ - 创建两个连接池:
772
+ * text_pool: decode_responses=True
773
+ * binary_pool: decode_responses=False
774
+ - 两个连接池使用相同的 max_connections 配置,总共不会超过 max_connections*2
775
+ - 实际使用中,通常只会用到一种模式的池,所以资源浪费很小
776
+
777
+ 优势:
778
+ - 正确处理文本和二进制数据
779
+ - 两个池独立管理,不会互相干扰
780
+ - 完美解决 Stream 等二进制操作的需求
781
+
782
+ Args:
783
+ redis_url: Redis 连接 URL
784
+ max_connections: 每个连接池的最大连接数
785
+ **pool_kwargs: 其他连接池参数
786
+
787
+ Returns:
788
+ tuple: (text_client, binary_client)
789
+ - text_client: decode_responses=True,返回字符串
790
+ - binary_client: decode_responses=False,返回字节
791
+
792
+ Example:
793
+ >>> text_redis, binary_redis = get_dual_mode_async_redis_client("redis://localhost:6379/0")
794
+ >>> await text_redis.set("key", "value")
795
+ >>> result = await text_redis.get("key") # str: "value"
796
+ >>> messages = await binary_redis.xreadgroup(...) # 返回字节数据
797
+ """
798
+ # 创建文本模式连接池(decode=True)
799
+ text_pool = get_async_redis_pool(
800
+ redis_url=redis_url,
801
+ decode_responses=True,
802
+ max_connections=max_connections,
803
+ **pool_kwargs
804
+ )
805
+
806
+ # 创建二进制模式连接池(decode=False)
807
+ binary_pool = get_async_redis_pool(
808
+ redis_url=redis_url,
809
+ decode_responses=False,
810
+ max_connections=max_connections,
811
+ **pool_kwargs
812
+ )
813
+
814
+ # 创建两个客户端
815
+ text_client = redis.Redis(connection_pool=text_pool)
816
+ binary_client = redis.Redis(connection_pool=binary_pool)
817
+
818
+ return text_client, binary_client
819
+
820
+
821
+ def get_async_redis_pool_for_pubsub(
822
+ redis_url: str,
823
+ decode_responses: bool = True,
824
+ max_connections: int = 10,
825
+ health_check_interval: int = 60,
826
+ **pool_kwargs
827
+ ) -> redis.ConnectionPool:
828
+ """
829
+ 获取或创建专门用于 Pub/Sub 的异步 Redis 连接池
830
+
831
+ Pub/Sub 是长连接,可能长时间没有消息,因此使用特殊配置:
832
+ - socket_timeout=0 (无限等待,不会因为没有消息而超时)
833
+ - max_connections=10 (Pub/Sub 只需要少量连接)
834
+ - health_check_interval=60 (每60秒主动检查连接健康)
835
+
836
+ Args:
837
+ redis_url: Redis 连接 URL
838
+ decode_responses: 是否解码响应为字符串
839
+ max_connections: 连接池最大连接数(默认10,Pub/Sub 不需要很多)
840
+ health_check_interval: 健康检查间隔(秒),默认60秒
841
+ **pool_kwargs: 其他连接池参数
842
+
843
+ Returns:
844
+ redis.ConnectionPool: 专门配置的 Pub/Sub 连接池
845
+ """
846
+ return get_async_redis_pool(
847
+ redis_url=redis_url,
848
+ decode_responses=decode_responses,
849
+ max_connections=max_connections,
850
+ socket_connect_timeout=30,
851
+ socket_timeout=0, # 无限等待!不会因为没有消息而超时
852
+ socket_keepalive=True,
853
+ health_check_interval=health_check_interval,
854
+ **pool_kwargs
855
+ )
856
+
857
+
858
+ def get_pg_engine_and_factory(
859
+ dsn: str,
860
+ pool_size: int = 5,
861
+ max_overflow: int = 10,
862
+ pool_recycle: int = 3600,
863
+ echo: bool = False,
864
+ **engine_kwargs
865
+ ) -> tuple:
866
+ """
867
+ 获取或创建 PostgreSQL 引擎和会话工厂(全局单例)
868
+
869
+ Args:
870
+ dsn: PostgreSQL 连接 DSN
871
+ pool_size: 连接池大小
872
+ max_overflow: 连接池溢出大小
873
+ pool_recycle: 连接回收时间(秒)
874
+ echo: 是否打印 SQL 语句
875
+ **engine_kwargs: 其他引擎参数
876
+
877
+ Returns:
878
+ tuple: (engine, session_factory)
879
+ """
880
+ if dsn not in _pg_engines:
881
+ # 创建异步引擎
882
+ engine = create_async_engine(
883
+ dsn,
884
+ pool_size=pool_size,
885
+ max_overflow=max_overflow,
886
+ pool_recycle=pool_recycle,
887
+ echo=echo,
888
+ **engine_kwargs
889
+ )
890
+
891
+ # 创建会话工厂
892
+ session_factory = async_sessionmaker(
893
+ bind=engine,
894
+ class_=AsyncSession,
895
+ expire_on_commit=False
896
+ )
897
+
898
+ _pg_engines[dsn] = engine
899
+ _pg_session_factories[dsn] = session_factory
900
+
901
+ logger.debug(f"创建PostgreSQL引擎: {dsn}")
902
+
903
+ return _pg_engines[dsn], _pg_session_factories[dsn]
904
+
905
+
906
+ # ============================================================
907
+ # Section 6: 配置和连接器类
908
+ # ============================================================
909
+
910
+ class DBConfig:
911
+ """数据库配置基类"""
912
+
913
+ @staticmethod
914
+ def parse_redis_config(config: Union[str, Dict[str, Any]]) -> str:
915
+ """
916
+ 解析 Redis 配置,统一返回连接 URL
917
+
918
+ Args:
919
+ config: 可以是:
920
+ - 字符串: "redis://host:port/db"
921
+ - 字典: {"url": "redis://..."} 或 {"host": ..., "port": ..., "db": ...}
922
+
923
+ Returns:
924
+ str: Redis 连接 URL
925
+
926
+ Examples:
927
+ >>> DBConfig.parse_redis_config("redis://localhost:6379/0")
928
+ 'redis://localhost:6379/0'
929
+
930
+ >>> DBConfig.parse_redis_config({"host": "localhost", "port": 6379, "db": 0})
931
+ 'redis://localhost:6379/0'
932
+
933
+ >>> DBConfig.parse_redis_config({"url": "redis://10.0.0.1:6379/5"})
934
+ 'redis://10.0.0.1:6379/5'
935
+ """
936
+ if isinstance(config, str):
937
+ return config
938
+
939
+ if isinstance(config, dict):
940
+ # 优先使用 url 字段
941
+ if 'url' in config:
942
+ return config['url']
943
+
944
+ # 从分离的配置构建 URL
945
+ host = config.get('host', 'localhost')
946
+ port = config.get('port', 6379)
947
+ db = config.get('db', 0)
948
+ password = config.get('password')
949
+
950
+ if password:
951
+ return f"redis://:{password}@{host}:{port}/{db}"
952
+ else:
953
+ return f"redis://{host}:{port}/{db}"
954
+
955
+ raise ValueError(f"不支持的 Redis 配置格式: {type(config)}")
956
+
957
+ @staticmethod
958
+ def parse_pg_config(config: Union[str, Dict[str, Any]]) -> str:
959
+ """
960
+ 解析 PostgreSQL 配置,统一返回 DSN
961
+
962
+ Args:
963
+ config: 可以是:
964
+ - 字符串: "postgresql://user:pass@host:port/db"
965
+ - 字典: {"url": "postgresql://..."} 或 {"host": ..., "user": ..., ...}
966
+
967
+ Returns:
968
+ str: PostgreSQL DSN (asyncpg 格式)
969
+
970
+ Examples:
971
+ >>> DBConfig.parse_pg_config("postgresql://user:pass@localhost/mydb")
972
+ 'postgresql+asyncpg://user:pass@localhost/mydb'
973
+
974
+ >>> DBConfig.parse_pg_config({
975
+ ... "host": "localhost",
976
+ ... "user": "admin",
977
+ ... "password": "secret",
978
+ ... "database": "mydb"
979
+ ... })
980
+ 'postgresql+asyncpg://admin:secret@localhost:5432/mydb'
981
+ """
982
+ if isinstance(config, str):
983
+ # 确保使用 asyncpg 驱动
984
+ if config.startswith('postgresql://'):
985
+ return config.replace('postgresql://', 'postgresql+asyncpg://', 1)
986
+ elif config.startswith('postgresql+asyncpg://'):
987
+ return config
988
+ else:
989
+ raise ValueError(f"不支持的 PostgreSQL URL 格式: {config}")
990
+
991
+ if isinstance(config, dict):
992
+ # 优先使用 url 字段
993
+ if 'url' in config:
994
+ url = config['url']
995
+ if url.startswith('postgresql://'):
996
+ return url.replace('postgresql://', 'postgresql+asyncpg://', 1)
997
+ return url
998
+
999
+ # 从分离的配置构建 DSN
1000
+ user = config.get('user', 'postgres')
1001
+ password = config.get('password', '')
1002
+ host = config.get('host', 'localhost')
1003
+ port = config.get('port', 5432)
1004
+ database = config.get('database', 'postgres')
1005
+
1006
+ return f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{database}"
1007
+
1008
+ raise ValueError(f"不支持的 PostgreSQL 配置格式: {type(config)}")
1009
+
1010
+
1011
+ class SyncRedisConnector:
1012
+ """
1013
+ 同步 Redis 连接管理器(使用全局单例连接池)
1014
+
1015
+ 使用示例:
1016
+ # 方式1: 直接使用
1017
+ connector = SyncRedisConnector("redis://localhost:6379/0")
1018
+ client = connector.get_client()
1019
+ client.set("key", "value")
1020
+
1021
+ # 方式2: 上下文管理器
1022
+ with SyncRedisConnector("redis://localhost:6379/0") as client:
1023
+ client.set("key", "value")
1024
+ """
1025
+
1026
+ def __init__(
1027
+ self,
1028
+ config: Union[str, Dict[str, Any]],
1029
+ decode_responses: bool = False,
1030
+ max_connections: int = 200,
1031
+ **pool_kwargs
1032
+ ):
1033
+ """
1034
+ 初始化同步 Redis 连接器
1035
+
1036
+ Args:
1037
+ config: Redis 配置(URL 或字典)
1038
+ decode_responses: 是否自动解码响应为字符串
1039
+ max_connections: 连接池最大连接数
1040
+ **pool_kwargs: 其他连接池参数
1041
+ """
1042
+ self.redis_url = DBConfig.parse_redis_config(config)
1043
+ self.decode_responses = decode_responses
1044
+ self.max_connections = max_connections
1045
+ self.pool_kwargs = pool_kwargs
1046
+
1047
+ # 使用全局单例连接池
1048
+ self._pool: sync_redis.ConnectionPool = get_sync_redis_pool(
1049
+ self.redis_url,
1050
+ decode_responses=self.decode_responses,
1051
+ max_connections=self.max_connections,
1052
+ **self.pool_kwargs
1053
+ )
1054
+ self._client: Optional[sync_redis.Redis] = None
1055
+ logger.debug(f"同步 Redis 连接器初始化: {self.redis_url}")
1056
+
1057
+ def get_client(self) -> sync_redis.Redis:
1058
+ """
1059
+ 获取同步 Redis 客户端
1060
+
1061
+ Returns:
1062
+ sync_redis.Redis: 同步 Redis 客户端实例
1063
+ """
1064
+ try:
1065
+ return sync_redis.Redis(connection_pool=self._pool)
1066
+ except Exception as e:
1067
+ logger.error(f"获取同步 Redis 客户端失败: {e}")
1068
+ traceback.print_exc()
1069
+ raise
1070
+
1071
+ def close(self):
1072
+ """关闭客户端(连接池由全局管理,不需要关闭)"""
1073
+ # 注意:连接池是全局单例,不需要关闭
1074
+ # 只关闭客户端连接
1075
+ pass
1076
+
1077
+ def __enter__(self) -> sync_redis.Redis:
1078
+ """上下文管理器入口"""
1079
+ self._client = self.get_client()
1080
+ return self._client
1081
+
1082
+ def __exit__(self, exc_type, exc_val, exc_tb):
1083
+ """上下文管理器退出"""
1084
+ if self._client:
1085
+ self._client.close()
1086
+ self._client = None
1087
+
1088
+
1089
+ class RedisConnector:
1090
+ """
1091
+ 异步 Redis 连接管理器(支持双模式,使用全局单例连接池)
1092
+
1093
+ 双模式说明:
1094
+ - 文本模式(decode_responses=True):返回字符串,用于普通操作
1095
+ - 二进制模式(decode_responses=False):返回字节,用于 Stream 等需要原始数据的操作
1096
+
1097
+ 注意:两个模式使用独立的连接池,但都享受全局单例机制
1098
+
1099
+ 使用示例:
1100
+ # 方式1: 文本模式(默认)
1101
+ connector = RedisConnector("redis://localhost:6379/0")
1102
+ client = await connector.get_client()
1103
+ await client.set("key", "value")
1104
+
1105
+ # 方式2: 二进制模式
1106
+ binary_client = await connector.get_client(binary_mode=True)
1107
+ messages = await binary_client.xreadgroup(...)
1108
+
1109
+ # 方式3: 便捷方法
1110
+ messages = await connector.xreadgroup_binary(...)
1111
+ """
1112
+
1113
+ def __init__(
1114
+ self,
1115
+ config: Union[str, Dict[str, Any]],
1116
+ decode_responses: bool = True,
1117
+ max_connections: int = 200,
1118
+ **pool_kwargs
1119
+ ):
1120
+ """
1121
+ 初始化 Redis 连接器
1122
+
1123
+ Args:
1124
+ config: Redis 配置(URL 或字典)
1125
+ decode_responses: 是否自动解码响应为字符串(默认True)
1126
+ max_connections: 连接池最大连接数
1127
+ **pool_kwargs: 其他连接池参数
1128
+ """
1129
+ self.redis_url = DBConfig.parse_redis_config(config)
1130
+ self.decode_responses = decode_responses
1131
+ self.max_connections = max_connections
1132
+ self.pool_kwargs = pool_kwargs
1133
+
1134
+ # 延迟创建双模式客户端
1135
+ self._text_client: Optional[redis.Redis] = None
1136
+ self._binary_client: Optional[redis.Redis] = None
1137
+ logger.debug(f"异步 Redis 连接器初始化: {self.redis_url}")
1138
+
1139
+ async def initialize(self):
1140
+ """
1141
+ 初始化连接池(向后兼容)
1142
+
1143
+ 注意:连接池已延迟创建,此方法保留用于向后兼容
1144
+ """
1145
+ pass # 连接池延迟创建
1146
+
1147
+ async def get_client(self, binary_mode: bool = False) -> redis.Redis:
1148
+ """
1149
+ 获取 Redis 客户端(支持双模式)
1150
+
1151
+ Args:
1152
+ binary_mode: 是否使用二进制模式(不自动解码)
1153
+ - False(默认):返回文本客户端(decode_responses=True)
1154
+ - True:返回二进制客户端(decode_responses=False)
1155
+
1156
+ Returns:
1157
+ redis.Redis: Redis 客户端实例
1158
+
1159
+ Example:
1160
+ # 文本模式
1161
+ client = await connector.get_client()
1162
+ value = await client.get("key") # 返回 str
1163
+
1164
+ # 二进制模式
1165
+ binary_client = await connector.get_client(binary_mode=True)
1166
+ messages = await binary_client.xreadgroup(...) # 返回 bytes
1167
+ """
1168
+ try:
1169
+ # 延迟创建双模式客户端
1170
+ if self._text_client is None:
1171
+ self._text_client, self._binary_client = get_dual_mode_async_redis_client(
1172
+ redis_url=self.redis_url,
1173
+ max_connections=self.max_connections,
1174
+ **self.pool_kwargs
1175
+ )
1176
+
1177
+ return self._binary_client if binary_mode else self._text_client
1178
+ except Exception as e:
1179
+ logger.error(f"获取 Redis 客户端失败: {e}")
1180
+ traceback.print_exc()
1181
+ raise
1182
+
1183
+ async def xreadgroup_binary(self, *args, **kwargs):
1184
+ """
1185
+ 便捷方法:使用二进制模式读取 Stream
1186
+
1187
+ 这是对 binary_client.xreadgroup() 的封装,避免每次都要指定 binary_mode=True
1188
+
1189
+ Args:
1190
+ *args, **kwargs: xreadgroup 的参数
1191
+
1192
+ Returns:
1193
+ Stream 消息列表(原始字节数据)
1194
+
1195
+ Example:
1196
+ messages = await connector.xreadgroup_binary(
1197
+ groupname="mygroup",
1198
+ consumername="consumer1",
1199
+ streams={"mystream": ">"},
1200
+ count=10,
1201
+ block=1000
1202
+ )
1203
+ """
1204
+ binary_client = await self.get_client(binary_mode=True)
1205
+ return await binary_client.xreadgroup(*args, **kwargs)
1206
+
1207
+ async def xread_binary(self, *args, **kwargs):
1208
+ """
1209
+ 便捷方法:使用二进制模式读取 Stream
1210
+
1211
+ Args:
1212
+ *args, **kwargs: xread 的参数
1213
+
1214
+ Returns:
1215
+ Stream 消息列表(原始字节数据)
1216
+ """
1217
+ binary_client = await self.get_client(binary_mode=True)
1218
+ return await binary_client.xread(*args, **kwargs)
1219
+
1220
+ async def close(self):
1221
+ """关闭客户端(连接池由全局管理,不需要关闭)"""
1222
+ # 注意:连接池是全局单例,不需要关闭
1223
+ # 只关闭客户端连接
1224
+ pass
1225
+
1226
+ async def __aenter__(self) -> redis.Redis:
1227
+ """异步上下文管理器入口"""
1228
+ await self.initialize()
1229
+ self._client = await self.get_client()
1230
+ return self._client
1231
+
1232
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1233
+ """异步上下文管理器退出"""
1234
+ if self._client:
1235
+ await self._client.close()
1236
+ self._client = None
1237
+
1238
+
1239
+ class PostgreSQLConnector:
1240
+ """
1241
+ PostgreSQL 连接管理器(使用全局单例引擎)
1242
+
1243
+ 使用示例:
1244
+ # 方式1: 直接使用
1245
+ connector = PostgreSQLConnector("postgresql://user:pass@localhost/db")
1246
+ session = await connector.get_session()
1247
+
1248
+ # 方式2: 上下文管理器
1249
+ async with PostgreSQLConnector(config) as session:
1250
+ result = await session.execute(select(User))
1251
+ """
1252
+
1253
+ def __init__(
1254
+ self,
1255
+ config: Union[str, Dict[str, Any]],
1256
+ pool_size: int = 5,
1257
+ max_overflow: int = 10,
1258
+ pool_recycle: int = 3600,
1259
+ echo: bool = False,
1260
+ **engine_kwargs
1261
+ ):
1262
+ """
1263
+ 初始化 PostgreSQL 连接器
1264
+
1265
+ Args:
1266
+ config: PostgreSQL 配置(DSN 或字典)
1267
+ pool_size: 连接池大小
1268
+ max_overflow: 连接池溢出大小
1269
+ pool_recycle: 连接回收时间(秒)
1270
+ echo: 是否打印 SQL 语句
1271
+ **engine_kwargs: 其他引擎参数
1272
+ """
1273
+ self.dsn = DBConfig.parse_pg_config(config)
1274
+ self.pool_size = pool_size
1275
+ self.max_overflow = max_overflow
1276
+ self.pool_recycle = pool_recycle
1277
+ self.echo = echo
1278
+ self.engine_kwargs = engine_kwargs
1279
+
1280
+ # 使用全局单例引擎和会话工厂
1281
+ self._engine, self._session_factory = get_pg_engine_and_factory(
1282
+ self.dsn,
1283
+ pool_size=self.pool_size,
1284
+ max_overflow=self.max_overflow,
1285
+ pool_recycle=self.pool_recycle,
1286
+ echo=self.echo,
1287
+ **self.engine_kwargs
1288
+ )
1289
+ logger.debug(f"PostgreSQL 连接器初始化: {self.dsn}")
1290
+
1291
+ async def initialize(self):
1292
+ """初始化数据库引擎和会话工厂(向后兼容)"""
1293
+ # 引擎已在 __init__ 中通过全局单例获取
1294
+ pass
1295
+
1296
+ async def get_session(self) -> AsyncSession:
1297
+ """
1298
+ 获取数据库会话
1299
+
1300
+ Returns:
1301
+ AsyncSession: SQLAlchemy 异步会话
1302
+
1303
+ Raises:
1304
+ RuntimeError: 引擎未初始化
1305
+ """
1306
+ try:
1307
+ return self._session_factory()
1308
+ except Exception as e:
1309
+ logger.error(f"获取 PostgreSQL 会话失败: {e}")
1310
+ traceback.print_exc()
1311
+ raise
1312
+
1313
+ @asynccontextmanager
1314
+ async def session_scope(self):
1315
+ """
1316
+ 会话上下文管理器(自动提交/回滚)
1317
+
1318
+ 使用示例:
1319
+ async with connector.session_scope() as session:
1320
+ user = User(name="Alice")
1321
+ session.add(user)
1322
+ # 自动提交
1323
+ """
1324
+ session = await self.get_session()
1325
+ try:
1326
+ yield session
1327
+ await session.commit()
1328
+ except Exception:
1329
+ await session.rollback()
1330
+ raise
1331
+ finally:
1332
+ await session.close()
1333
+
1334
+ async def close(self):
1335
+ """关闭客户端(引擎由全局管理,不需要关闭)"""
1336
+ # 注意:引擎是全局单例,不需要关闭
1337
+ pass
1338
+
1339
+ async def __aenter__(self) -> AsyncSession:
1340
+ """异步上下文管理器入口"""
1341
+ await self.initialize()
1342
+ return await self.get_session()
1343
+
1344
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1345
+ """异步上下文管理器退出"""
1346
+ # 引擎是全局单例,不需要关闭
1347
+ pass
1348
+
1349
+
1350
+ class ConnectionManager:
1351
+ """
1352
+ 连接管理器 - 统一管理 Redis 和 PostgreSQL 连接
1353
+
1354
+ 使用示例:
1355
+ manager = ConnectionManager(
1356
+ redis_config="redis://localhost:6379/0",
1357
+ pg_config={"host": "localhost", "user": "admin", "password": "secret", "database": "mydb"}
1358
+ )
1359
+
1360
+ # 获取 Redis 客户端
1361
+ redis_client = await manager.get_redis_client()
1362
+
1363
+ # 获取 PostgreSQL 会话
1364
+ pg_session = await manager.get_pg_session()
1365
+
1366
+ # 关闭所有连接
1367
+ await manager.close_all()
1368
+ """
1369
+
1370
+ def __init__(
1371
+ self,
1372
+ redis_config: Optional[Union[str, Dict[str, Any]]] = None,
1373
+ pg_config: Optional[Union[str, Dict[str, Any]]] = None,
1374
+ redis_decode: bool = True,
1375
+ redis_max_connections: int = 50,
1376
+ pg_pool_size: int = 5,
1377
+ pg_max_overflow: int = 10,
1378
+ ):
1379
+ """
1380
+ 初始化连接管理器
1381
+
1382
+ Args:
1383
+ redis_config: Redis 配置
1384
+ pg_config: PostgreSQL 配置
1385
+ redis_decode: Redis 是否解码响应
1386
+ redis_max_connections: Redis 最大连接数
1387
+ pg_pool_size: PostgreSQL 连接池大小
1388
+ pg_max_overflow: PostgreSQL 最大溢出连接数
1389
+ """
1390
+ self._redis_connector: Optional[RedisConnector] = None
1391
+ self._pg_connector: Optional[PostgreSQLConnector] = None
1392
+
1393
+ if redis_config:
1394
+ self._redis_connector = RedisConnector(
1395
+ redis_config,
1396
+ decode_responses=redis_decode,
1397
+ max_connections=redis_max_connections
1398
+ )
1399
+
1400
+ if pg_config:
1401
+ self._pg_connector = PostgreSQLConnector(
1402
+ pg_config,
1403
+ pool_size=pg_pool_size,
1404
+ max_overflow=pg_max_overflow
1405
+ )
1406
+
1407
+ async def get_redis_client(self, decode: bool = True) -> redis.Redis:
1408
+ """获取 Redis 客户端"""
1409
+ if not self._redis_connector:
1410
+ raise ValueError("未配置 Redis 连接")
1411
+
1412
+ # 如果需要不同的解码设置,创建新的连接器
1413
+ if decode != self._redis_connector.decode_responses:
1414
+ temp_connector = RedisConnector(
1415
+ self._redis_connector.redis_url,
1416
+ decode_responses=decode
1417
+ )
1418
+ return await temp_connector.get_client()
1419
+
1420
+ return await self._redis_connector.get_client()
1421
+
1422
+ async def get_pg_session(self) -> AsyncSession:
1423
+ """获取 PostgreSQL 会话"""
1424
+ if not self._pg_connector:
1425
+ raise ValueError("未配置 PostgreSQL 连接")
1426
+
1427
+ return await self._pg_connector.get_session()
1428
+
1429
+ @asynccontextmanager
1430
+ async def pg_session_scope(self):
1431
+ """PostgreSQL 会话上下文(自动提交/回滚)"""
1432
+ if not self._pg_connector:
1433
+ raise ValueError("未配置 PostgreSQL 连接")
1434
+
1435
+ async with self._pg_connector.session_scope() as session:
1436
+ yield session
1437
+
1438
+ async def close_all(self):
1439
+ """关闭所有连接"""
1440
+ if self._redis_connector:
1441
+ await self._redis_connector.close()
1442
+ if self._pg_connector:
1443
+ await self._pg_connector.close()
1444
+ # logger.info("所有数据库连接已关闭")
1445
+
1446
+
1447
+ # 便捷函数
1448
+
1449
+ async def create_redis_client(
1450
+ config: Union[str, Dict[str, Any]],
1451
+ decode_responses: bool = True
1452
+ ) -> redis.Redis:
1453
+ """
1454
+ 快捷创建 Redis 客户端
1455
+
1456
+ Args:
1457
+ config: Redis 配置
1458
+ decode_responses: 是否解码响应
1459
+
1460
+ Returns:
1461
+ redis.Redis: Redis 客户端
1462
+ """
1463
+ connector = RedisConnector(config, decode_responses=decode_responses)
1464
+ return await connector.get_client()
1465
+
1466
+
1467
+ async def create_pg_session(
1468
+ config: Union[str, Dict[str, Any]]
1469
+ ) -> AsyncSession:
1470
+ """
1471
+ 快捷创建 PostgreSQL 会话
1472
+
1473
+ Args:
1474
+ config: PostgreSQL 配置
1475
+
1476
+ Returns:
1477
+ AsyncSession: SQLAlchemy 异步会话
1478
+ """
1479
+ connector = PostgreSQLConnector(config)
1480
+ return await connector.get_session()
1481
+
1482
+
1483
+ # ============================================================
1484
+ # Section 7: 全局客户端实例管理
1485
+ # ============================================================
1486
+
1487
+ # 兼容旧代码:保持旧的全局变量引用(指向 _PoolRegistry)
1488
+ _sync_redis_clients = _PoolRegistry.sync_redis_clients
1489
+ _sync_binary_redis_clients = _PoolRegistry.sync_binary_redis_clients
1490
+ _async_redis_clients = _PoolRegistry.async_redis_clients
1491
+ _async_binary_redis_clients = _PoolRegistry.async_binary_redis_clients
1492
+
1493
+ def get_sync_redis_client(
1494
+ redis_url: str,
1495
+ decode_responses: bool = True,
1496
+ max_connections: int = 1000,
1497
+ **pool_kwargs
1498
+ ) -> sync_redis.StrictRedis:
1499
+ """
1500
+ 获取同步 Redis 客户端实例(全局单例)
1501
+
1502
+ 与 get_sync_redis_pool 的区别:
1503
+ - get_sync_redis_pool: 返回连接池,需要自己创建客户端
1504
+ - get_sync_redis_client: 直接返回可用的客户端实例(推荐使用)
1505
+
1506
+ Args:
1507
+ redis_url: Redis 连接 URL
1508
+ decode_responses: 是否解码响应为字符串
1509
+ max_connections: 连接池最大连接数
1510
+ **pool_kwargs: 其他连接池参数
1511
+
1512
+ Returns:
1513
+ sync_redis.StrictRedis: 同步 Redis 客户端实例(全局单例)
1514
+ """
1515
+ # 过滤掉不被 redis 连接池支持的参数
1516
+ # 'name' 参数不被 redis.Connection 支持,会导致 TypeError
1517
+ pool_kwargs.pop('name', None)
1518
+
1519
+ # 选择客户端缓存
1520
+ client_cache = _sync_redis_clients if decode_responses else _sync_binary_redis_clients
1521
+
1522
+ # 构建缓存键
1523
+ cache_key = redis_url
1524
+
1525
+ if cache_key not in client_cache:
1526
+ # 获取连接池(已经是单例)
1527
+ pool = get_sync_redis_pool(
1528
+ redis_url=redis_url,
1529
+ decode_responses=decode_responses,
1530
+ max_connections=max_connections,
1531
+ **pool_kwargs
1532
+ )
1533
+
1534
+ # 创建客户端实例并缓存
1535
+ client_cache[cache_key] = sync_redis.StrictRedis(connection_pool=pool)
1536
+ logger.debug(f"创建同步Redis客户端实例: {redis_url}, decode={decode_responses}, PID={os.getpid()}")
1537
+
1538
+ return client_cache[cache_key]
1539
+
1540
+
1541
+ def get_async_redis_client(
1542
+ redis_url: str,
1543
+ decode_responses: bool = True,
1544
+ max_connections: int = 1000,
1545
+ socket_timeout: int = 60,
1546
+ **pool_kwargs
1547
+ ) -> redis.StrictRedis:
1548
+ """
1549
+ 获取异步 Redis 客户端实例(全局单例)
1550
+
1551
+ Args:
1552
+ redis_url: Redis 连接 URL
1553
+ decode_responses: 是否解码响应为字符串
1554
+ max_connections: 连接池最大连接数
1555
+ socket_timeout: Socket 读写超时(秒)
1556
+ **pool_kwargs: 其他连接池参数
1557
+
1558
+ Returns:
1559
+ redis.StrictRedis: 异步 Redis 客户端实例(全局单例)
1560
+ """
1561
+ # 过滤掉不被 redis 连接池支持的参数
1562
+ # 'name' 参数不被 redis.asyncio.Connection 支持,会导致 TypeError
1563
+ pool_kwargs.pop('name', None)
1564
+
1565
+ # 选择客户端缓存
1566
+ client_cache = _async_redis_clients if decode_responses else _async_binary_redis_clients
1567
+
1568
+ # 构建缓存键
1569
+ cache_key = redis_url
1570
+
1571
+ if cache_key not in client_cache:
1572
+ # 获取连接池(已经是单例)
1573
+ pool = get_async_redis_pool(
1574
+ redis_url=redis_url,
1575
+ decode_responses=decode_responses,
1576
+ max_connections=max_connections,
1577
+ socket_timeout=socket_timeout,
1578
+ **pool_kwargs
1579
+ )
1580
+
1581
+ # 创建客户端实例并缓存
1582
+ client_cache[cache_key] = redis.StrictRedis(connection_pool=pool)
1583
+ logger.debug(f"创建异步Redis客户端实例: {redis_url}, decode={decode_responses}, PID={os.getpid()}")
1584
+
1585
+ return client_cache[cache_key]
1586
+
1587
+
1588
+ def clear_all_cache():
1589
+ """
1590
+ 清空所有缓存(连接池 + 客户端实例)
1591
+
1592
+ 用于子进程fork后彻底重置所有连接
1593
+
1594
+ 注意:此函数可能在logging未配置前被调用(如子进程fork后),因此使用print而非logger
1595
+ """
1596
+ # 使用 _PoolRegistry 统一清空
1597
+ _PoolRegistry.clear_all()
1598
+
1599
+ # 使用print而非logger,因为在子进程fork后可能还未配置logging
1600
+ # print(f"[PID {os.getpid()}] 清空所有Redis连接池和客户端缓存", flush=True)
1601
+
1602
+
1603
+ __all__ = [
1604
+ # 全局连接池函数(保留,向后兼容)
1605
+ 'get_sync_redis_pool',
1606
+ 'get_async_redis_pool',
1607
+ 'get_async_redis_pool_for_pubsub', # 专门用于 Pub/Sub 的连接池
1608
+ 'get_pg_engine_and_factory',
1609
+
1610
+ # 客户端实例函数(推荐使用)
1611
+ 'get_sync_redis_client',
1612
+ 'get_async_redis_client',
1613
+
1614
+ # 缓存清理
1615
+ 'clear_all_cache',
1616
+
1617
+ # 配置解析
1618
+ 'DBConfig',
1619
+
1620
+ # 连接器类(包装全局连接池)
1621
+ 'SyncRedisConnector',
1622
+ 'RedisConnector',
1623
+ 'PostgreSQLConnector',
1624
+ 'ConnectionManager',
1625
+
1626
+ # 便捷函数
1627
+ 'create_redis_client',
1628
+ 'create_pg_session',
1629
+ ]