jettask 0.2.20__py3-none-any.whl → 0.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. jettask/__init__.py +4 -0
  2. jettask/cli.py +12 -8
  3. jettask/config/lua_scripts.py +37 -0
  4. jettask/config/nacos_config.py +1 -1
  5. jettask/core/app.py +313 -340
  6. jettask/core/container.py +4 -4
  7. jettask/{persistence → core}/namespace.py +93 -27
  8. jettask/core/task.py +16 -9
  9. jettask/core/unified_manager_base.py +136 -26
  10. jettask/db/__init__.py +67 -0
  11. jettask/db/base.py +137 -0
  12. jettask/{utils/db_connector.py → db/connector.py} +130 -26
  13. jettask/db/models/__init__.py +16 -0
  14. jettask/db/models/scheduled_task.py +196 -0
  15. jettask/db/models/task.py +77 -0
  16. jettask/db/models/task_run.py +85 -0
  17. jettask/executor/__init__.py +0 -15
  18. jettask/executor/core.py +76 -31
  19. jettask/executor/process_entry.py +29 -114
  20. jettask/executor/task_executor.py +4 -0
  21. jettask/messaging/event_pool.py +928 -685
  22. jettask/messaging/scanner.py +30 -0
  23. jettask/persistence/__init__.py +28 -103
  24. jettask/persistence/buffer.py +170 -0
  25. jettask/persistence/consumer.py +330 -249
  26. jettask/persistence/manager.py +304 -0
  27. jettask/persistence/persistence.py +391 -0
  28. jettask/scheduler/__init__.py +15 -3
  29. jettask/scheduler/{task_crud.py → database.py} +61 -57
  30. jettask/scheduler/loader.py +2 -2
  31. jettask/scheduler/{scheduler_coordinator.py → manager.py} +23 -6
  32. jettask/scheduler/models.py +14 -10
  33. jettask/scheduler/schedule.py +166 -0
  34. jettask/scheduler/scheduler.py +12 -11
  35. jettask/schemas/__init__.py +50 -1
  36. jettask/schemas/backlog.py +43 -6
  37. jettask/schemas/namespace.py +70 -19
  38. jettask/schemas/queue.py +19 -3
  39. jettask/schemas/responses.py +493 -0
  40. jettask/task/__init__.py +0 -2
  41. jettask/task/router.py +3 -0
  42. jettask/test_connection_monitor.py +1 -1
  43. jettask/utils/__init__.py +7 -5
  44. jettask/utils/db_init.py +8 -4
  45. jettask/utils/namespace_dep.py +167 -0
  46. jettask/utils/queue_matcher.py +186 -0
  47. jettask/utils/rate_limit/concurrency_limiter.py +7 -1
  48. jettask/utils/stream_backlog.py +1 -1
  49. jettask/webui/__init__.py +0 -1
  50. jettask/webui/api/__init__.py +4 -4
  51. jettask/webui/api/alerts.py +806 -71
  52. jettask/webui/api/example_refactored.py +400 -0
  53. jettask/webui/api/namespaces.py +390 -45
  54. jettask/webui/api/overview.py +300 -54
  55. jettask/webui/api/queues.py +971 -267
  56. jettask/webui/api/scheduled.py +1249 -56
  57. jettask/webui/api/settings.py +129 -7
  58. jettask/webui/api/workers.py +442 -0
  59. jettask/webui/app.py +46 -2329
  60. jettask/webui/middleware/__init__.py +6 -0
  61. jettask/webui/middleware/namespace_middleware.py +135 -0
  62. jettask/webui/services/__init__.py +146 -0
  63. jettask/webui/services/heartbeat_service.py +251 -0
  64. jettask/webui/services/overview_service.py +60 -51
  65. jettask/webui/services/queue_monitor_service.py +426 -0
  66. jettask/webui/services/redis_monitor_service.py +87 -0
  67. jettask/webui/services/settings_service.py +174 -111
  68. jettask/webui/services/task_monitor_service.py +222 -0
  69. jettask/webui/services/timeline_pg_service.py +452 -0
  70. jettask/webui/services/timeline_service.py +189 -0
  71. jettask/webui/services/worker_monitor_service.py +467 -0
  72. jettask/webui/utils/__init__.py +11 -0
  73. jettask/webui/utils/time_utils.py +122 -0
  74. jettask/worker/lifecycle.py +8 -2
  75. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/METADATA +1 -1
  76. jettask-0.2.24.dist-info/RECORD +142 -0
  77. jettask/executor/executor.py +0 -338
  78. jettask/persistence/backlog_monitor.py +0 -567
  79. jettask/persistence/base.py +0 -2334
  80. jettask/persistence/db_manager.py +0 -516
  81. jettask/persistence/maintenance.py +0 -81
  82. jettask/persistence/message_consumer.py +0 -259
  83. jettask/persistence/models.py +0 -49
  84. jettask/persistence/offline_recovery.py +0 -196
  85. jettask/persistence/queue_discovery.py +0 -215
  86. jettask/persistence/task_persistence.py +0 -218
  87. jettask/persistence/task_updater.py +0 -583
  88. jettask/scheduler/add_execution_count.sql +0 -11
  89. jettask/scheduler/add_priority_field.sql +0 -26
  90. jettask/scheduler/add_scheduler_id.sql +0 -25
  91. jettask/scheduler/add_scheduler_id_index.sql +0 -10
  92. jettask/scheduler/make_scheduler_id_required.sql +0 -28
  93. jettask/scheduler/migrate_interval_seconds.sql +0 -9
  94. jettask/scheduler/performance_optimization.sql +0 -45
  95. jettask/scheduler/run_scheduler.py +0 -186
  96. jettask/scheduler/schema.sql +0 -84
  97. jettask/task/task_executor.py +0 -318
  98. jettask/webui/api/analytics.py +0 -323
  99. jettask/webui/config.py +0 -90
  100. jettask/webui/models/__init__.py +0 -3
  101. jettask/webui/models/namespace.py +0 -63
  102. jettask/webui/namespace_manager/__init__.py +0 -10
  103. jettask/webui/namespace_manager/multi.py +0 -593
  104. jettask/webui/namespace_manager/unified.py +0 -193
  105. jettask/webui/run.py +0 -46
  106. jettask-0.2.20.dist-info/RECORD +0 -145
  107. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/WHEEL +0 -0
  108. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/entry_points.txt +0 -0
  109. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/licenses/LICENSE +0 -0
  110. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/top_level.txt +0 -0
jettask/db/base.py ADDED
@@ -0,0 +1,137 @@
1
+ """
2
+ 数据库基础配置
3
+
4
+ 使用 SQLAlchemy 2.0 的异步API
5
+ """
6
+ from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
7
+ from sqlalchemy.orm import declarative_base
8
+ from sqlalchemy.pool import NullPool
9
+ import logging
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # 创建基类
14
+ Base = declarative_base()
15
+
16
+ # 全局引擎和会话工厂
17
+ _engine = None
18
+ _async_session_factory = None
19
+
20
+
21
+ def get_engine(database_url: str, **kwargs):
22
+ """
23
+ 获取或创建数据库引擎
24
+
25
+ Args:
26
+ database_url: 数据库连接URL(如 postgresql+asyncpg://user:pass@host/db)
27
+ **kwargs: 其他引擎参数
28
+
29
+ Returns:
30
+ AsyncEngine: 异步数据库引擎
31
+ """
32
+ global _engine
33
+
34
+ if _engine is None:
35
+ # 确保使用正确的异步驱动
36
+ if database_url and 'postgresql://' in database_url:
37
+ database_url = database_url.replace('postgresql://', 'postgresql+asyncpg://')
38
+
39
+ # 默认配置
40
+ engine_kwargs = {
41
+ 'echo': kwargs.pop('echo', False),
42
+ 'pool_pre_ping': kwargs.pop('pool_pre_ping', True),
43
+ 'poolclass': kwargs.pop('poolclass', NullPool), # 使用NullPool避免连接池问题
44
+ }
45
+ engine_kwargs.update(kwargs)
46
+
47
+ _engine = create_async_engine(database_url, **engine_kwargs)
48
+ logger.info(f"数据库引擎已创建: {database_url.split('@')[-1]}")
49
+
50
+ return _engine
51
+
52
+
53
+ def get_session_factory(database_url: str = None, **kwargs):
54
+ """
55
+ 获取或创建会话工厂
56
+
57
+ Args:
58
+ database_url: 数据库连接URL
59
+ **kwargs: 其他引擎参数
60
+
61
+ Returns:
62
+ async_sessionmaker: 异步会话工厂
63
+ """
64
+ global _async_session_factory
65
+
66
+ if _async_session_factory is None:
67
+ if database_url is None:
68
+ raise ValueError("数据库URL未提供")
69
+
70
+ engine = get_engine(database_url, **kwargs)
71
+ _async_session_factory = async_sessionmaker(
72
+ engine,
73
+ class_=AsyncSession,
74
+ expire_on_commit=False,
75
+ )
76
+
77
+ return _async_session_factory
78
+
79
+
80
+ def get_session(database_url: str = None, **kwargs):
81
+ """
82
+ 获取数据库会话(上下文管理器)
83
+
84
+ Args:
85
+ database_url: 数据库连接URL
86
+ **kwargs: 其他引擎参数
87
+
88
+ Returns:
89
+ AsyncSession: 异步数据库会话上下文管理器
90
+
91
+ Example:
92
+ async with get_session(db_url) as session:
93
+ result = await session.execute(select(Task))
94
+ tasks = result.scalars().all()
95
+ """
96
+ factory = get_session_factory(database_url, **kwargs)
97
+ return factory()
98
+
99
+
100
+ async def init_db(database_url: str, **kwargs):
101
+ """
102
+ 初始化数据库(创建所有表)
103
+
104
+ Args:
105
+ database_url: 数据库连接URL
106
+ **kwargs: 其他引擎参数
107
+
108
+ Example:
109
+ await init_db('postgresql+asyncpg://user:pass@localhost/jettask')
110
+ """
111
+ # 导入所有模型以注册到 Base.metadata
112
+ from .models import Task, ScheduledTask, TaskExecutionHistory # noqa: F401
113
+
114
+ engine = get_engine(database_url, **kwargs)
115
+
116
+ async with engine.begin() as conn:
117
+ # 创建所有表
118
+ await conn.run_sync(Base.metadata.create_all)
119
+ logger.info("数据库表已创建/更新")
120
+
121
+
122
+ async def drop_all(database_url: str, **kwargs):
123
+ """
124
+ 删除所有表(谨慎使用!)
125
+
126
+ Args:
127
+ database_url: 数据库连接URL
128
+ **kwargs: 其他引擎参数
129
+ """
130
+ # 导入所有模型以注册到 Base.metadata
131
+ from .models import Task, ScheduledTask, TaskExecutionHistory # noqa: F401
132
+
133
+ engine = get_engine(database_url, **kwargs)
134
+
135
+ async with engine.begin() as conn:
136
+ await conn.run_sync(Base.metadata.drop_all)
137
+ logger.warning("所有数据库表已删除")
@@ -60,6 +60,7 @@ from redis.asyncio import BlockingConnectionPool
60
60
  from redis.backoff import ExponentialBackoff
61
61
  from redis.retry import Retry
62
62
  from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
63
+ import asyncpg
63
64
 
64
65
  logger = logging.getLogger(__name__)
65
66
 
@@ -85,6 +86,9 @@ class _PoolRegistry:
85
86
  pg_engines: Dict[str, Any] = {}
86
87
  pg_session_factories: Dict[str, async_sessionmaker] = {}
87
88
 
89
+ # PostgreSQL asyncpg 连接池缓存(原生asyncpg连接池)
90
+ asyncpg_pools: Dict[str, Any] = {}
91
+
88
92
  # Redis 客户端实例缓存
89
93
  sync_redis_clients: Dict[str, sync_redis.StrictRedis] = {}
90
94
  sync_binary_redis_clients: Dict[str, sync_redis.StrictRedis] = {}
@@ -100,6 +104,7 @@ class _PoolRegistry:
100
104
  cls.async_binary_redis_pools.clear()
101
105
  cls.pg_engines.clear()
102
106
  cls.pg_session_factories.clear()
107
+ cls.asyncpg_pools.clear()
103
108
  cls.sync_redis_clients.clear()
104
109
  cls.sync_binary_redis_clients.clear()
105
110
  cls.async_redis_clients.clear()
@@ -273,6 +278,13 @@ class IdleTrackingBlockingConnectionPool(sync_redis.BlockingConnectionPool):
273
278
  connections_to_keep.append(conn)
274
279
  continue
275
280
 
281
+ # 检查连接是否被标记为 PubSub 连接
282
+ # 注意:需要外部在创建 PubSub 时调用 mark_as_pubsub() 标记连接
283
+ if hasattr(conn, '_is_pubsub_connection') and conn._is_pubsub_connection:
284
+ logger.info(f"跳过 PubSub 连接清理: {id(conn)}")
285
+ connections_to_keep.append(conn)
286
+ continue
287
+
276
288
  conn_id = id(conn)
277
289
  with self._connection_last_use_lock:
278
290
  last_use = self._connection_last_use.get(conn_id, current_time)
@@ -322,11 +334,11 @@ class IdleTrackingBlockingConnectionPool(sync_redis.BlockingConnectionPool):
322
334
  final_total = len(self._connections)
323
335
  final_available = self.pool.qsize()
324
336
  final_in_use = final_total - final_available
325
- logger.info(
326
- f"空闲连接清理完成: 清理前 {initial_total} (可用: {initial_available}, 使用中: {initial_in_use}), "
327
- f"关闭 {closed_count} 个, "
328
- f"剩余 {final_total} (可用: {final_available}, 使用中: {final_in_use})"
329
- )
337
+ # logger.info(
338
+ # f"空闲连接清理完成: 清理前 {initial_total} (可用: {initial_available}, 使用中: {initial_in_use}), "
339
+ # f"关闭 {closed_count} 个, "
340
+ # f"剩余 {final_total} (可用: {final_available}, 使用中: {final_in_use})"
341
+ # )
330
342
 
331
343
  def _stop_idle_cleaner(self):
332
344
  """停止空闲连接清理线程"""
@@ -473,6 +485,13 @@ class AsyncIdleTrackingBlockingConnectionPool(redis.BlockingConnectionPool):
473
485
  connections_to_keep.append(conn)
474
486
  continue
475
487
 
488
+ # 检查连接是否被标记为 PubSub 连接
489
+ # 注意:需要外部在创建 PubSub 时调用 mark_as_pubsub() 标记连接
490
+ if hasattr(conn, '_is_pubsub_connection') and conn._is_pubsub_connection:
491
+ logger.info(f"跳过 PubSub 连接清理: {id(conn)}")
492
+ connections_to_keep.append(conn)
493
+ continue
494
+
476
495
  conn_id = id(conn)
477
496
  async with self._connection_last_use_lock:
478
497
  last_use = self._connection_last_use.get(conn_id, current_time)
@@ -579,8 +598,8 @@ def get_sync_redis_pool(
579
598
  socket_timeout: int = 60,
580
599
  timeout: int = 60,
581
600
  health_check_interval: int = 30,
582
- max_idle_time: int = 120,
583
- idle_check_interval: int = 10,
601
+ max_idle_time: int = 10,
602
+ idle_check_interval: int = 1,
584
603
  **pool_kwargs
585
604
  ) -> IdleTrackingBlockingConnectionPool:
586
605
  """
@@ -627,8 +646,8 @@ def get_sync_redis_pool(
627
646
  # 选择连接池缓存字典
628
647
  pool_cache = _sync_redis_pools if decode_responses else _sync_binary_redis_pools
629
648
 
630
- # 构建缓存键(包含影响连接的关键参数)
631
- cache_key = redis_url
649
+ # 构建缓存键(包含socket_timeout以区分不同的超时配置)
650
+ cache_key = f"{redis_url}:socket_timeout={socket_timeout}"
632
651
 
633
652
  if cache_key not in pool_cache:
634
653
  socket_keepalive_options = _get_socket_keepalive_options()
@@ -671,12 +690,12 @@ def get_async_redis_pool(
671
690
  decode_responses: bool = True,
672
691
  max_connections: int = 200,
673
692
  socket_connect_timeout: int = 30,
674
- socket_timeout: int = 60,
693
+ socket_timeout: int | None = None, # None表示无限等待(支持PubSub长连接)
675
694
  socket_keepalive: bool = True,
676
695
  health_check_interval: int = 30,
677
696
  timeout: int = 60,
678
- max_idle_time: int = 120,
679
- idle_check_interval: int = 10,
697
+ max_idle_time: int = 10,
698
+ idle_check_interval: int = 1,
680
699
  **pool_kwargs
681
700
  ) -> AsyncIdleTrackingBlockingConnectionPool:
682
701
  """
@@ -694,7 +713,7 @@ def get_async_redis_pool(
694
713
  decode_responses: 是否解码响应为字符串
695
714
  max_connections: 连接池最大连接数(默认200)
696
715
  socket_connect_timeout: Socket 连接超时(秒),默认30秒
697
- socket_timeout: Socket 读写超时(秒),默认60秒,0表示无限等待
716
+ socket_timeout: Socket 读写超时(秒),None表示无限等待(支持PubSub),>0表示具体超时时间
698
717
  socket_keepalive: 是否启用 socket keepalive
699
718
  health_check_interval: 健康检查间隔(秒),默认30秒(推荐30-60秒)
700
719
  timeout: 等待可用连接的超时时间(秒),默认60秒
@@ -709,8 +728,10 @@ def get_async_redis_pool(
709
728
  # 选择连接池缓存字典
710
729
  pool_cache = _async_redis_pools if decode_responses else _async_binary_redis_pools
711
730
 
712
- # 构建缓存键
713
- cache_key = redis_url
731
+ # 构建缓存键(包含socket_timeout以区分不同的超时配置)
732
+ cache_key = f"{redis_url}:socket_timeout={socket_timeout}"
733
+
734
+ logger.info(f"get_async_redis_pool called: socket_timeout={socket_timeout}, cache_key={cache_key}, exists={cache_key in pool_cache}")
714
735
 
715
736
  if cache_key not in pool_cache:
716
737
  socket_keepalive_options = _get_socket_keepalive_options()
@@ -736,9 +757,10 @@ def get_async_redis_pool(
736
757
  if socket_keepalive and socket_keepalive_options:
737
758
  pool_params['socket_keepalive_options'] = socket_keepalive_options
738
759
 
739
- # 添加 socket_timeout(0 表示无限等待)
740
- if socket_timeout > 0:
741
- pool_params['socket_timeout'] = socket_timeout
760
+ # 添加 socket_timeout
761
+ # 注意:None 表示无限等待(适合PubSub),>0 表示具体超时时间
762
+ # socket_timeout 参数总是会被设置,即使是 None
763
+ pool_params['socket_timeout'] = socket_timeout
742
764
 
743
765
  # 合并其他参数
744
766
  pool_params.update(pool_kwargs)
@@ -829,7 +851,7 @@ def get_async_redis_pool_for_pubsub(
829
851
  获取或创建专门用于 Pub/Sub 的异步 Redis 连接池
830
852
 
831
853
  Pub/Sub 是长连接,可能长时间没有消息,因此使用特殊配置:
832
- - socket_timeout=0 (无限等待,不会因为没有消息而超时)
854
+ - socket_timeout=None (无限等待,不会因为没有消息而超时)
833
855
  - max_connections=10 (Pub/Sub 只需要少量连接)
834
856
  - health_check_interval=60 (每60秒主动检查连接健康)
835
857
 
@@ -848,7 +870,7 @@ def get_async_redis_pool_for_pubsub(
848
870
  decode_responses=decode_responses,
849
871
  max_connections=max_connections,
850
872
  socket_connect_timeout=30,
851
- socket_timeout=0, # 无限等待!不会因为没有消息而超时
873
+ socket_timeout=None, # 无限等待!不会因为没有消息而超时
852
874
  socket_keepalive=True,
853
875
  health_check_interval=health_check_interval,
854
876
  **pool_kwargs
@@ -903,6 +925,85 @@ def get_pg_engine_and_factory(
903
925
  return _pg_engines[dsn], _pg_session_factories[dsn]
904
926
 
905
927
 
928
+ async def get_asyncpg_pool(
929
+ dsn: str,
930
+ min_size: int = 2,
931
+ max_size: int = 10,
932
+ command_timeout: float = 60.0,
933
+ timeout: float = 10.0,
934
+ max_retries: int = 3,
935
+ retry_delay: int = 5,
936
+ **pool_kwargs
937
+ ) -> asyncpg.Pool:
938
+ """
939
+ 获取或创建 asyncpg 连接池(全局单例)
940
+
941
+ Args:
942
+ dsn: PostgreSQL 连接 DSN(支持 postgresql:// 或 postgresql+asyncpg:// 格式)
943
+ min_size: 连接池最小连接数
944
+ max_size: 连接池最大连接数
945
+ command_timeout: 命令执行超时(秒)
946
+ timeout: 连接超时(秒)
947
+ max_retries: 最大重试次数
948
+ retry_delay: 重试间隔(秒)
949
+ **pool_kwargs: 其他连接池参数
950
+
951
+ Returns:
952
+ asyncpg.Pool: asyncpg 连接池
953
+ """
954
+ # 将 SQLAlchemy 格式的 DSN 转换为标准 PostgreSQL DSN
955
+ # postgresql+asyncpg:// -> postgresql://
956
+ if dsn and '+asyncpg' in dsn:
957
+ dsn = dsn.replace('+asyncpg', '')
958
+
959
+ # 隐藏密码的 DSN 用于日志
960
+ safe_dsn = _get_safe_pg_dsn(dsn)
961
+
962
+ if dsn not in _PoolRegistry.asyncpg_pools:
963
+ # 重试机制
964
+ for attempt in range(1, max_retries + 1):
965
+ try:
966
+ logger.info(f"正在创建 asyncpg 连接池 (尝试 {attempt}/{max_retries}): {safe_dsn}")
967
+
968
+ pool = await asyncpg.create_pool(
969
+ dsn,
970
+ min_size=min_size,
971
+ max_size=max_size,
972
+ command_timeout=command_timeout,
973
+ timeout=timeout,
974
+ **pool_kwargs
975
+ )
976
+
977
+ _PoolRegistry.asyncpg_pools[dsn] = pool
978
+ logger.info(f"asyncpg 连接池创建成功: {safe_dsn} (min={min_size}, max={max_size})")
979
+ break
980
+
981
+ except Exception as e:
982
+ logger.error(f"asyncpg 连接池创建失败 (尝试 {attempt}/{max_retries}): {safe_dsn}, 错误: {e}")
983
+
984
+ if attempt < max_retries:
985
+ logger.info(f"等待 {retry_delay} 秒后重试...")
986
+ await asyncio.sleep(retry_delay)
987
+ else:
988
+ # 最后一次尝试失败,抛出异常
989
+ logger.error(f"asyncpg 连接池创建失败,已达到最大重试次数 ({max_retries})")
990
+ raise
991
+
992
+ return _PoolRegistry.asyncpg_pools[dsn]
993
+
994
+
995
+ def _get_safe_pg_dsn(dsn: str) -> str:
996
+ """获取用于日志的安全 DSN(隐藏密码)"""
997
+ if not dsn:
998
+ return "None"
999
+ try:
1000
+ import re
1001
+ # postgresql://user:password@host:port/database -> postgresql://user:***@host:port/database
1002
+ return re.sub(r'://([^:]+):([^@]+)@', r'://\1:***@', dsn)
1003
+ except:
1004
+ return dsn
1005
+
1006
+
906
1007
  # ============================================================
907
1008
  # Section 6: 配置和连接器类
908
1009
  # ============================================================
@@ -1519,8 +1620,9 @@ def get_sync_redis_client(
1519
1620
  # 选择客户端缓存
1520
1621
  client_cache = _sync_redis_clients if decode_responses else _sync_binary_redis_clients
1521
1622
 
1522
- # 构建缓存键
1523
- cache_key = redis_url
1623
+ # 构建缓存键(需要包含socket_timeout以匹配pool的缓存键)
1624
+ socket_timeout_val = pool_kwargs.get('socket_timeout', 60) # 获取socket_timeout,默认60
1625
+ cache_key = f"{redis_url}:socket_timeout={socket_timeout_val}"
1524
1626
 
1525
1627
  if cache_key not in client_cache:
1526
1628
  # 获取连接池(已经是单例)
@@ -1542,7 +1644,7 @@ def get_async_redis_client(
1542
1644
  redis_url: str,
1543
1645
  decode_responses: bool = True,
1544
1646
  max_connections: int = 1000,
1545
- socket_timeout: int = 60,
1647
+ socket_timeout: int | None = None, # None表示无限等待,支持PubSub长连接
1546
1648
  **pool_kwargs
1547
1649
  ) -> redis.StrictRedis:
1548
1650
  """
@@ -1552,7 +1654,7 @@ def get_async_redis_client(
1552
1654
  redis_url: Redis 连接 URL
1553
1655
  decode_responses: 是否解码响应为字符串
1554
1656
  max_connections: 连接池最大连接数
1555
- socket_timeout: Socket 读写超时(秒)
1657
+ socket_timeout: Socket 读写超时(秒),None表示无限等待
1556
1658
  **pool_kwargs: 其他连接池参数
1557
1659
 
1558
1660
  Returns:
@@ -1565,8 +1667,8 @@ def get_async_redis_client(
1565
1667
  # 选择客户端缓存
1566
1668
  client_cache = _async_redis_clients if decode_responses else _async_binary_redis_clients
1567
1669
 
1568
- # 构建缓存键
1569
- cache_key = redis_url
1670
+ # 构建缓存键(包含socket_timeout以匹配pool的缓存键)
1671
+ cache_key = f"{redis_url}:socket_timeout={socket_timeout}"
1570
1672
 
1571
1673
  if cache_key not in client_cache:
1572
1674
  # 获取连接池(已经是单例)
@@ -1606,10 +1708,12 @@ __all__ = [
1606
1708
  'get_async_redis_pool',
1607
1709
  'get_async_redis_pool_for_pubsub', # 专门用于 Pub/Sub 的连接池
1608
1710
  'get_pg_engine_and_factory',
1711
+ 'get_asyncpg_pool', # asyncpg 原生连接池
1609
1712
 
1610
1713
  # 客户端实例函数(推荐使用)
1611
1714
  'get_sync_redis_client',
1612
1715
  'get_async_redis_client',
1716
+ 'get_dual_mode_async_redis_client', # 双模式客户端(文本+二进制)
1613
1717
 
1614
1718
  # 缓存清理
1615
1719
  'clear_all_cache',
@@ -0,0 +1,16 @@
1
+ """
2
+ 数据库模型定义
3
+
4
+ 所有表的 SQLAlchemy 模型定义
5
+ """
6
+
7
+ from .task import Task
8
+ from .task_run import TaskRun
9
+ from .scheduled_task import ScheduledTask, TaskExecutionHistory
10
+
11
+ __all__ = [
12
+ 'Task',
13
+ 'TaskRun',
14
+ 'ScheduledTask',
15
+ 'TaskExecutionHistory',
16
+ ]
@@ -0,0 +1,196 @@
1
+ """
2
+ ScheduledTask 和 TaskExecutionHistory 模型
3
+
4
+ 对应 scheduled_tasks 和 task_execution_history 表,用于定时任务调度
5
+ """
6
+ from sqlalchemy import (
7
+ Column, BigInteger, String, Integer, Text, Boolean,
8
+ TIMESTAMP, Index, Numeric, ForeignKey
9
+ )
10
+ from sqlalchemy.dialects.postgresql import JSONB
11
+ from datetime import datetime
12
+ from typing import Optional, Dict, Any, List
13
+
14
+ from ..base import Base
15
+
16
+
17
+ class ScheduledTask(Base):
18
+ """
19
+ 定时任务表
20
+
21
+ 定时任务以 queue 为核心,定期向指定队列发送消息
22
+ """
23
+ __tablename__ = 'scheduled_tasks'
24
+
25
+ # 主键
26
+ id = Column(BigInteger, primary_key=True, autoincrement=True, comment='自增主键')
27
+
28
+ # 唯一标识
29
+ scheduler_id = Column(
30
+ String(255),
31
+ nullable=False,
32
+ unique=True,
33
+ comment='任务的唯一标识符(用于去重)'
34
+ )
35
+
36
+ # 任务类型
37
+ task_type = Column(
38
+ String(50),
39
+ nullable=False,
40
+ comment='任务类型: cron, interval, once'
41
+ )
42
+
43
+ # 任务执行相关
44
+ queue_name = Column(String(100), nullable=False, comment='目标队列名')
45
+ namespace = Column(String(100), default='default', comment='命名空间')
46
+ task_args = Column(JSONB, default=[], comment='任务参数')
47
+ task_kwargs = Column(JSONB, default={}, comment='任务关键字参数')
48
+
49
+ # 调度相关
50
+ cron_expression = Column(String(100), comment='cron表达式 (task_type=cron时使用)')
51
+ interval_seconds = Column(Numeric(10, 2), comment='间隔秒数 (task_type=interval时使用)')
52
+ next_run_time = Column(TIMESTAMP(timezone=True), comment='下次执行时间')
53
+ last_run_time = Column(TIMESTAMP(timezone=True), comment='上次执行时间')
54
+
55
+ # 状态和控制
56
+ enabled = Column(Boolean, default=True, comment='是否启用')
57
+ max_retries = Column(Integer, default=3, comment='最大重试次数')
58
+ retry_delay = Column(Integer, default=60, comment='重试延迟(秒)')
59
+ timeout = Column(Integer, default=300, comment='任务超时时间(秒)')
60
+ priority = Column(Integer, comment='任务优先级 (1=最高, 数字越大优先级越低,NULL=默认最低)')
61
+
62
+ # 元数据 (使用 column name override 避免与 SQLAlchemy 的 metadata 属性冲突)
63
+ description = Column(Text, comment='任务描述')
64
+ tags = Column(JSONB, default=[], comment='标签')
65
+ task_metadata = Column('metadata', JSONB, default={}, comment='额外元数据')
66
+
67
+ # 时间戳
68
+ created_at = Column(
69
+ TIMESTAMP(timezone=True),
70
+ default=datetime.utcnow,
71
+ comment='创建时间'
72
+ )
73
+ updated_at = Column(
74
+ TIMESTAMP(timezone=True),
75
+ default=datetime.utcnow,
76
+ onupdate=datetime.utcnow,
77
+ comment='更新时间'
78
+ )
79
+
80
+ # 索引
81
+ __table_args__ = (
82
+ Index('idx_scheduled_tasks_next_run', 'next_run_time', postgresql_where=(enabled == True)), # noqa: E712
83
+ Index('idx_scheduled_tasks_task_type', 'task_type'),
84
+ Index('idx_scheduled_tasks_queue', 'queue_name'),
85
+ Index('idx_scheduled_tasks_enabled', 'enabled'),
86
+ Index('idx_scheduled_tasks_scheduler_id', 'scheduler_id', unique=True),
87
+ )
88
+
89
+ def to_dict(self) -> Dict[str, Any]:
90
+ """转换为字典"""
91
+ return {
92
+ 'id': self.id,
93
+ 'scheduler_id': self.scheduler_id,
94
+ 'task_type': self.task_type,
95
+ 'queue_name': self.queue_name,
96
+ 'namespace': self.namespace,
97
+ 'task_args': self.task_args,
98
+ 'task_kwargs': self.task_kwargs,
99
+ 'cron_expression': self.cron_expression,
100
+ 'interval_seconds': float(self.interval_seconds) if self.interval_seconds else None,
101
+ 'next_run_time': self.next_run_time.isoformat() if self.next_run_time else None,
102
+ 'last_run_time': self.last_run_time.isoformat() if self.last_run_time else None,
103
+ 'enabled': self.enabled,
104
+ 'max_retries': self.max_retries,
105
+ 'retry_delay': self.retry_delay,
106
+ 'timeout': self.timeout,
107
+ 'priority': self.priority,
108
+ 'description': self.description,
109
+ 'tags': self.tags,
110
+ 'metadata': self.task_metadata,
111
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
112
+ 'updated_at': self.updated_at.isoformat() if self.updated_at else None,
113
+ }
114
+
115
+ def __repr__(self) -> str:
116
+ return f"<ScheduledTask(id={self.id}, scheduler_id='{self.scheduler_id}', queue='{self.queue_name}', type='{self.task_type}')>"
117
+
118
+
119
+ class TaskExecutionHistory(Base):
120
+ """
121
+ 任务执行历史表
122
+
123
+ 记录定时任务的执行历史
124
+ """
125
+ __tablename__ = 'task_execution_history'
126
+
127
+ # 主键
128
+ id = Column(BigInteger, primary_key=True, autoincrement=True)
129
+
130
+ # 关联任务
131
+ task_id = Column(
132
+ BigInteger,
133
+ nullable=False,
134
+ comment='关联的任务ID(外键到 scheduled_tasks.id)'
135
+ )
136
+ event_id = Column(String(255), nullable=False, comment='执行事件ID')
137
+
138
+ # 执行信息
139
+ scheduled_time = Column(
140
+ TIMESTAMP(timezone=True),
141
+ nullable=False,
142
+ comment='计划执行时间'
143
+ )
144
+ started_at = Column(TIMESTAMP(timezone=True), comment='实际开始时间')
145
+ finished_at = Column(TIMESTAMP(timezone=True), comment='完成时间')
146
+
147
+ # 执行结果
148
+ status = Column(
149
+ String(50),
150
+ nullable=False,
151
+ comment='状态: pending, running, success, failed, timeout'
152
+ )
153
+ result = Column(JSONB, comment='执行结果')
154
+ error_message = Column(Text, comment='错误信息')
155
+ retry_count = Column(Integer, default=0, comment='重试次数')
156
+
157
+ # 性能指标
158
+ duration_ms = Column(Integer, comment='执行耗时(毫秒)')
159
+ worker_id = Column(String(100), comment='执行的worker ID')
160
+
161
+ # 时间戳
162
+ created_at = Column(
163
+ TIMESTAMP(timezone=True),
164
+ default=datetime.utcnow,
165
+ comment='创建时间'
166
+ )
167
+
168
+ # 索引
169
+ __table_args__ = (
170
+ Index('idx_task_history_task_id', 'task_id'),
171
+ Index('idx_task_history_event_id', 'event_id'),
172
+ Index('idx_task_history_status', 'status'),
173
+ Index('idx_task_history_scheduled', 'scheduled_time'),
174
+ Index('idx_task_history_created', 'created_at'),
175
+ )
176
+
177
+ def to_dict(self) -> Dict[str, Any]:
178
+ """转换为字典"""
179
+ return {
180
+ 'id': self.id,
181
+ 'task_id': self.task_id,
182
+ 'event_id': self.event_id,
183
+ 'scheduled_time': self.scheduled_time.isoformat() if self.scheduled_time else None,
184
+ 'started_at': self.started_at.isoformat() if self.started_at else None,
185
+ 'finished_at': self.finished_at.isoformat() if self.finished_at else None,
186
+ 'status': self.status,
187
+ 'result': self.result,
188
+ 'error_message': self.error_message,
189
+ 'retry_count': self.retry_count,
190
+ 'duration_ms': self.duration_ms,
191
+ 'worker_id': self.worker_id,
192
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
193
+ }
194
+
195
+ def __repr__(self) -> str:
196
+ return f"<TaskExecutionHistory(id={self.id}, task_id={self.task_id}, event_id='{self.event_id}', status='{self.status}')>"