jettask 0.2.19__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. jettask/__init__.py +10 -3
  2. jettask/cli.py +314 -228
  3. jettask/config/__init__.py +9 -1
  4. jettask/config/config.py +245 -0
  5. jettask/config/env_loader.py +381 -0
  6. jettask/config/lua_scripts.py +158 -0
  7. jettask/config/nacos_config.py +132 -5
  8. jettask/core/__init__.py +1 -1
  9. jettask/core/app.py +1573 -666
  10. jettask/core/app_importer.py +33 -16
  11. jettask/core/container.py +532 -0
  12. jettask/core/task.py +1 -4
  13. jettask/core/unified_manager_base.py +2 -2
  14. jettask/executor/__init__.py +38 -0
  15. jettask/executor/core.py +625 -0
  16. jettask/executor/executor.py +338 -0
  17. jettask/executor/orchestrator.py +290 -0
  18. jettask/executor/process_entry.py +638 -0
  19. jettask/executor/task_executor.py +317 -0
  20. jettask/messaging/__init__.py +68 -0
  21. jettask/messaging/event_pool.py +2188 -0
  22. jettask/messaging/reader.py +519 -0
  23. jettask/messaging/registry.py +266 -0
  24. jettask/messaging/scanner.py +369 -0
  25. jettask/messaging/sender.py +312 -0
  26. jettask/persistence/__init__.py +118 -0
  27. jettask/persistence/backlog_monitor.py +567 -0
  28. jettask/{backend/data_access.py → persistence/base.py} +58 -57
  29. jettask/persistence/consumer.py +315 -0
  30. jettask/{core → persistence}/db_manager.py +23 -22
  31. jettask/persistence/maintenance.py +81 -0
  32. jettask/persistence/message_consumer.py +259 -0
  33. jettask/{backend/namespace_data_access.py → persistence/namespace.py} +66 -98
  34. jettask/persistence/offline_recovery.py +196 -0
  35. jettask/persistence/queue_discovery.py +215 -0
  36. jettask/persistence/task_persistence.py +218 -0
  37. jettask/persistence/task_updater.py +583 -0
  38. jettask/scheduler/__init__.py +2 -2
  39. jettask/scheduler/loader.py +6 -5
  40. jettask/scheduler/run_scheduler.py +1 -1
  41. jettask/scheduler/scheduler.py +7 -7
  42. jettask/scheduler/{unified_scheduler_manager.py → scheduler_coordinator.py} +18 -13
  43. jettask/task/__init__.py +16 -0
  44. jettask/{router.py → task/router.py} +26 -8
  45. jettask/task/task_center/__init__.py +9 -0
  46. jettask/task/task_executor.py +318 -0
  47. jettask/task/task_registry.py +291 -0
  48. jettask/test_connection_monitor.py +73 -0
  49. jettask/utils/__init__.py +31 -1
  50. jettask/{monitor/run_backlog_collector.py → utils/backlog_collector.py} +1 -1
  51. jettask/utils/db_connector.py +1629 -0
  52. jettask/{db_init.py → utils/db_init.py} +1 -1
  53. jettask/utils/rate_limit/__init__.py +30 -0
  54. jettask/utils/rate_limit/concurrency_limiter.py +665 -0
  55. jettask/utils/rate_limit/config.py +145 -0
  56. jettask/utils/rate_limit/limiter.py +41 -0
  57. jettask/utils/rate_limit/manager.py +269 -0
  58. jettask/utils/rate_limit/qps_limiter.py +154 -0
  59. jettask/utils/rate_limit/task_limiter.py +384 -0
  60. jettask/utils/serializer.py +3 -0
  61. jettask/{monitor/stream_backlog_monitor.py → utils/stream_backlog.py} +14 -6
  62. jettask/utils/time_sync.py +173 -0
  63. jettask/webui/__init__.py +27 -0
  64. jettask/{api/v1 → webui/api}/alerts.py +1 -1
  65. jettask/{api/v1 → webui/api}/analytics.py +2 -2
  66. jettask/{api/v1 → webui/api}/namespaces.py +1 -1
  67. jettask/{api/v1 → webui/api}/overview.py +1 -1
  68. jettask/{api/v1 → webui/api}/queues.py +3 -3
  69. jettask/{api/v1 → webui/api}/scheduled.py +1 -1
  70. jettask/{api/v1 → webui/api}/settings.py +1 -1
  71. jettask/{api.py → webui/app.py} +253 -145
  72. jettask/webui/namespace_manager/__init__.py +10 -0
  73. jettask/{multi_namespace_consumer.py → webui/namespace_manager/multi.py} +69 -22
  74. jettask/{unified_consumer_manager.py → webui/namespace_manager/unified.py} +1 -1
  75. jettask/{run.py → webui/run.py} +2 -2
  76. jettask/{services → webui/services}/__init__.py +1 -3
  77. jettask/{services → webui/services}/overview_service.py +34 -16
  78. jettask/{services → webui/services}/queue_service.py +1 -1
  79. jettask/{backend → webui/services}/queue_stats_v2.py +1 -1
  80. jettask/{services → webui/services}/settings_service.py +1 -1
  81. jettask/worker/__init__.py +53 -0
  82. jettask/worker/lifecycle.py +1507 -0
  83. jettask/worker/manager.py +583 -0
  84. jettask/{core/offline_worker_recovery.py → worker/recovery.py} +268 -175
  85. {jettask-0.2.19.dist-info → jettask-0.2.20.dist-info}/METADATA +2 -71
  86. jettask-0.2.20.dist-info/RECORD +145 -0
  87. jettask/__main__.py +0 -140
  88. jettask/api/__init__.py +0 -103
  89. jettask/backend/__init__.py +0 -1
  90. jettask/backend/api/__init__.py +0 -3
  91. jettask/backend/api/v1/__init__.py +0 -17
  92. jettask/backend/api/v1/monitoring.py +0 -431
  93. jettask/backend/api/v1/namespaces.py +0 -504
  94. jettask/backend/api/v1/queues.py +0 -342
  95. jettask/backend/api/v1/tasks.py +0 -367
  96. jettask/backend/core/__init__.py +0 -3
  97. jettask/backend/core/cache.py +0 -221
  98. jettask/backend/core/database.py +0 -200
  99. jettask/backend/core/exceptions.py +0 -102
  100. jettask/backend/dependencies.py +0 -261
  101. jettask/backend/init_meta_db.py +0 -158
  102. jettask/backend/main.py +0 -1426
  103. jettask/backend/main_unified.py +0 -78
  104. jettask/backend/main_v2.py +0 -394
  105. jettask/backend/models/__init__.py +0 -3
  106. jettask/backend/models/requests.py +0 -236
  107. jettask/backend/models/responses.py +0 -230
  108. jettask/backend/namespace_api_old.py +0 -267
  109. jettask/backend/services/__init__.py +0 -3
  110. jettask/backend/start.py +0 -42
  111. jettask/backend/unified_api_router.py +0 -1541
  112. jettask/cleanup_deprecated_tables.sql +0 -16
  113. jettask/core/consumer_manager.py +0 -1695
  114. jettask/core/delay_scanner.py +0 -256
  115. jettask/core/event_pool.py +0 -1700
  116. jettask/core/heartbeat_process.py +0 -222
  117. jettask/core/task_batch.py +0 -153
  118. jettask/core/worker_scanner.py +0 -271
  119. jettask/executors/__init__.py +0 -5
  120. jettask/executors/asyncio.py +0 -876
  121. jettask/executors/base.py +0 -30
  122. jettask/executors/common.py +0 -148
  123. jettask/executors/multi_asyncio.py +0 -309
  124. jettask/gradio_app.py +0 -570
  125. jettask/integrated_gradio_app.py +0 -1088
  126. jettask/main.py +0 -0
  127. jettask/monitoring/__init__.py +0 -3
  128. jettask/pg_consumer.py +0 -1896
  129. jettask/run_monitor.py +0 -22
  130. jettask/run_webui.py +0 -148
  131. jettask/scheduler/multi_namespace_scheduler.py +0 -294
  132. jettask/scheduler/unified_manager.py +0 -450
  133. jettask/task_center_client.py +0 -150
  134. jettask/utils/serializer_optimized.py +0 -33
  135. jettask/webui_exceptions.py +0 -67
  136. jettask-0.2.19.dist-info/RECORD +0 -150
  137. /jettask/{constants.py → config/constants.py} +0 -0
  138. /jettask/{backend/config.py → config/task_center.py} +0 -0
  139. /jettask/{pg_consumer → messaging/pg_consumer}/pg_consumer_v2.py +0 -0
  140. /jettask/{pg_consumer → messaging/pg_consumer}/sql/add_execution_time_field.sql +0 -0
  141. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_new_tables.sql +0 -0
  142. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_tables_v3.sql +0 -0
  143. /jettask/{pg_consumer → messaging/pg_consumer}/sql/migrate_to_new_structure.sql +0 -0
  144. /jettask/{pg_consumer → messaging/pg_consumer}/sql/modify_time_fields.sql +0 -0
  145. /jettask/{pg_consumer → messaging/pg_consumer}/sql_utils.py +0 -0
  146. /jettask/{models.py → persistence/models.py} +0 -0
  147. /jettask/scheduler/{manager.py → task_crud.py} +0 -0
  148. /jettask/{schema.sql → schemas/schema.sql} +0 -0
  149. /jettask/{task_center.py → task/task_center/client.py} +0 -0
  150. /jettask/{monitoring → utils}/file_watcher.py +0 -0
  151. /jettask/{services/redis_monitor_service.py → utils/redis_monitor.py} +0 -0
  152. /jettask/{api/v1 → webui/api}/__init__.py +0 -0
  153. /jettask/{webui_config.py → webui/config.py} +0 -0
  154. /jettask/{webui_models → webui/models}/__init__.py +0 -0
  155. /jettask/{webui_models → webui/models}/namespace.py +0 -0
  156. /jettask/{services → webui/services}/alert_service.py +0 -0
  157. /jettask/{services → webui/services}/analytics_service.py +0 -0
  158. /jettask/{services → webui/services}/scheduled_task_service.py +0 -0
  159. /jettask/{services → webui/services}/task_service.py +0 -0
  160. /jettask/{webui_sql → webui/sql}/batch_upsert_functions.sql +0 -0
  161. /jettask/{webui_sql → webui/sql}/verify_database.sql +0 -0
  162. {jettask-0.2.19.dist-info → jettask-0.2.20.dist-info}/WHEEL +0 -0
  163. {jettask-0.2.19.dist-info → jettask-0.2.20.dist-info}/entry_points.txt +0 -0
  164. {jettask-0.2.19.dist-info → jettask-0.2.20.dist-info}/licenses/LICENSE +0 -0
  165. {jettask-0.2.19.dist-info → jettask-0.2.20.dist-info}/top_level.txt +0 -0
jettask/core/app.py CHANGED
@@ -1,28 +1,39 @@
1
1
  import os
2
- import sys
2
+
3
3
  import time
4
4
  from datetime import datetime
5
5
  from ..utils.serializer import dumps, loads, dumps_str, loads_str
6
6
  import signal
7
- import socket
7
+
8
8
  import asyncio
9
9
  import logging
10
10
  import contextlib
11
11
  import importlib
12
- import multiprocessing
12
+ import time
13
+
13
14
  from typing import List
14
- from collections import defaultdict, deque
15
+
15
16
 
16
17
  import redis
17
18
  from redis import asyncio as aioredis
18
- from watchdog.observers import Observer
19
19
 
20
+ # 导入TaskMessage
21
+ from .message import TaskMessage
20
22
  from .task import Task
21
- from .event_pool import EventPool
22
- from ..executors import AsyncioExecutor, MultiAsyncioExecutor
23
- from ..monitoring import FileChangeHandler
23
+ from .enums import TaskStatus
24
+ from jettask.messaging.event_pool import EventPool
25
+ from ..executor.orchestrator import ProcessOrchestrator
24
26
  from ..utils import gen_task_name
25
27
  from ..exceptions import TaskTimeoutError, TaskExecutionError, TaskNotFoundError
28
+ # 导入统一的数据库连接管理
29
+ from ..utils.db_connector import get_sync_redis_client, get_async_redis_client
30
+ # 导入Lua脚本
31
+ from ..config.lua_scripts import (
32
+ LUA_SCRIPT_DELAYED_TASKS,
33
+ LUA_SCRIPT_NORMAL_TASKS,
34
+ LUA_SCRIPT_SEND_DELAYED_TASKS
35
+ )
36
+ import uvloop
26
37
 
27
38
  logger = logging.getLogger('app')
28
39
 
@@ -33,251 +44,59 @@ logging.basicConfig(
33
44
  datefmt="%Y-%m-%d %H:%M:%S",
34
45
  )
35
46
 
36
- # 尝试导入性能优化库
37
- try:
38
- import uvloop
39
- UVLOOP_AVAILABLE = True
40
- # 自动启用uvloop
41
- asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
42
- logger.debug("Using uvloop for better performance")
43
- except ImportError:
44
- UVLOOP_AVAILABLE = False
45
-
46
- _on_app_finalizers = set()
47
-
48
- # 全局连接池复用
49
- _redis_pools = {}
50
- _async_redis_pools = {}
51
- # 专门用于二进制数据的连接池(用于Stream操作)
52
- _binary_redis_pools = {}
53
- _async_binary_redis_pools = {}
54
-
55
- def get_redis_pool(redis_url: str, max_connections: int = 200):
56
- """获取或创建Redis连接池"""
57
- if redis_url not in _redis_pools:
58
- # 构建socket keepalive选项,仅在Linux上使用
59
- socket_keepalive_options = {}
60
- if hasattr(socket, 'TCP_KEEPIDLE'):
61
- socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
62
- if hasattr(socket, 'TCP_KEEPINTVL'):
63
- socket_keepalive_options[socket.TCP_KEEPINTVL] = 3
64
- if hasattr(socket, 'TCP_KEEPCNT'):
65
- socket_keepalive_options[socket.TCP_KEEPCNT] = 5
66
-
67
- _redis_pools[redis_url] = redis.ConnectionPool.from_url(
68
- redis_url,
69
- decode_responses=True,
70
- max_connections=max_connections,
71
- retry_on_timeout=True,
72
- retry_on_error=[ConnectionError, TimeoutError],
73
- socket_keepalive=True,
74
- socket_keepalive_options=socket_keepalive_options if socket_keepalive_options else None,
75
- health_check_interval=30,
76
- # 优化超时配置以处理高负载
77
- socket_connect_timeout=10, # 增加连接超时时间
78
- socket_timeout=15, # 增加读取超时时间,避免频繁超时
79
- )
80
- return _redis_pools[redis_url]
81
-
82
- def get_async_redis_pool(redis_url: str, max_connections: int = 200):
83
- """获取或创建异步Redis连接池"""
84
- if redis_url not in _async_redis_pools:
85
- # 构建socket keepalive选项,仅在Linux上使用
86
- socket_keepalive_options = {}
87
- if hasattr(socket, 'TCP_KEEPIDLE'):
88
- socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
89
- if hasattr(socket, 'TCP_KEEPINTVL'):
90
- socket_keepalive_options[socket.TCP_KEEPINTVL] = 3
91
- if hasattr(socket, 'TCP_KEEPCNT'):
92
- socket_keepalive_options[socket.TCP_KEEPCNT] = 5
93
-
94
- _async_redis_pools[redis_url] = aioredis.ConnectionPool.from_url(
95
- redis_url,
96
- decode_responses=True,
97
- max_connections=max_connections,
98
- retry_on_timeout=True,
99
- retry_on_error=[ConnectionError, TimeoutError],
100
- socket_keepalive=True,
101
- socket_keepalive_options=socket_keepalive_options if socket_keepalive_options else None,
102
- health_check_interval=30,
103
- # 优化超时配置以处理高负载
104
- socket_connect_timeout=10, # 增加连接超时时间
105
- socket_timeout=15, # 增加读取超时时间,避免频繁超时
106
- )
107
- return _async_redis_pools[redis_url]
108
-
109
- def get_binary_redis_pool(redis_url: str, max_connections: int = 200):
110
- """获取或创建用于二进制数据的Redis连接池(Stream操作需要)"""
111
- if redis_url not in _binary_redis_pools:
112
- # 构建socket keepalive选项,仅在Linux上使用
113
- socket_keepalive_options = {}
114
- if hasattr(socket, 'TCP_KEEPIDLE'):
115
- socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
116
- if hasattr(socket, 'TCP_KEEPINTVL'):
117
- socket_keepalive_options[socket.TCP_KEEPINTVL] = 3
118
- if hasattr(socket, 'TCP_KEEPCNT'):
119
- socket_keepalive_options[socket.TCP_KEEPCNT] = 5
120
-
121
- _binary_redis_pools[redis_url] = redis.ConnectionPool.from_url(
122
- redis_url,
123
- decode_responses=False, # 不解码,因为Stream数据是msgpack二进制
124
- max_connections=max_connections,
125
- retry_on_timeout=True,
126
- retry_on_error=[ConnectionError, TimeoutError],
127
- socket_keepalive=True,
128
- socket_keepalive_options=socket_keepalive_options if socket_keepalive_options else None,
129
- health_check_interval=30,
130
- # 优化超时配置以处理高负载
131
- socket_connect_timeout=10, # 增加连接超时时间
132
- socket_timeout=15, # 增加读取超时时间,避免频繁超时
133
- )
134
- return _binary_redis_pools[redis_url]
135
-
136
- def get_async_binary_redis_pool(redis_url: str, max_connections: int = 200):
137
- """获取或创建用于二进制数据的异步Redis连接池(Stream操作需要)"""
138
- if redis_url not in _async_binary_redis_pools:
139
- # 构建socket keepalive选项,仅在Linux上使用
140
- socket_keepalive_options = {}
141
- if hasattr(socket, 'TCP_KEEPIDLE'):
142
- socket_keepalive_options[socket.TCP_KEEPIDLE] = 1
143
- if hasattr(socket, 'TCP_KEEPINTVL'):
144
- socket_keepalive_options[socket.TCP_KEEPINTVL] = 3
145
- if hasattr(socket, 'TCP_KEEPCNT'):
146
- socket_keepalive_options[socket.TCP_KEEPCNT] = 5
147
-
148
- _async_binary_redis_pools[redis_url] = aioredis.ConnectionPool.from_url(
149
- redis_url,
150
- decode_responses=False, # 不解码,因为Stream数据是msgpack二进制
151
- max_connections=max_connections,
152
- retry_on_timeout=True,
153
- retry_on_error=[ConnectionError, TimeoutError],
154
- socket_keepalive=True,
155
- socket_keepalive_options=socket_keepalive_options if socket_keepalive_options else None,
156
- health_check_interval=30,
157
- # 优化超时配置以处理高负载
158
- socket_connect_timeout=10, # 增加连接超时时间
159
- socket_timeout=15, # 增加读取超时时间,避免频繁超时
160
- )
161
- return _async_binary_redis_pools[redis_url]
162
-
163
-
164
- def connect_on_app_finalize(callback):
165
- """Connect callback to be called when any app is finalized."""
166
- _on_app_finalizers.add(callback)
167
- return callback
47
+ UVLOOP_AVAILABLE = True
48
+ # 自动启用uvloop
49
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
50
+ logger.debug("Using uvloop for better performance")
168
51
 
169
52
 
170
53
  class Jettask(object):
171
- # Lua脚本定义为类常量,避免重复定义
172
- _LUA_SCRIPT_DELAYED_TASKS = """
173
- local prefix = ARGV[1]
174
- local current_time = tonumber(ARGV[2])
175
- local results = {}
176
-
177
- -- 从ARGV[3]开始,每5个参数为一组任务信息
178
- -- [stream_key, stream_data, execute_at, delay_seconds, queue]
179
- for i = 3, #ARGV, 5 do
180
- local stream_key = ARGV[i]
181
- local stream_data = ARGV[i+1]
182
- local execute_at = tonumber(ARGV[i+2])
183
- local delay_seconds = tonumber(ARGV[i+3])
184
- local queue = ARGV[i+4]
185
-
186
- -- 使用Hash存储所有队列的offset
187
- local offsets_hash = prefix .. ':QUEUE_OFFSETS'
188
- -- 使用HINCRBY原子递增offset
189
- local offset = redis.call('HINCRBY', offsets_hash, queue, 1)
190
-
191
- -- 1. 添加消息到Stream(包含offset字段)
192
- local stream_id = redis.call('XADD', stream_key, '*',
193
- 'data', stream_data,
194
- 'offset', offset)
195
-
196
- -- 2. 添加到延迟队列ZSET
197
- local delayed_queue_key = prefix .. ':DELAYED_QUEUE:' .. queue
198
- redis.call('ZADD', delayed_queue_key, execute_at, stream_id)
199
-
200
- -- 3. 设置任务状态Hash(只存储status,其他信息从Stream获取)
201
- local task_key = prefix .. ':TASK:' .. stream_id
202
- redis.call('HSET', task_key, 'status', 'delayed')
203
-
204
- -- 4. 设置过期时间
205
- local expire_seconds = math.max(1, math.floor(delay_seconds + 3600))
206
- redis.call('EXPIRE', task_key, expire_seconds)
207
-
208
- -- 保存stream_id到结果
209
- table.insert(results, stream_id)
210
- end
211
-
212
- return results
213
- """
214
-
215
- _LUA_SCRIPT_NORMAL_TASKS = """
216
- local prefix = ARGV[1]
217
- local current_time = ARGV[2]
218
- local results = {}
219
-
220
- -- 从ARGV[3]开始,每2个参数为一组任务信息
221
- -- [stream_key, stream_data]
222
- for i = 3, #ARGV, 2 do
223
- local stream_key = ARGV[i]
224
- local stream_data = ARGV[i+1]
225
-
226
- -- 从stream_key中提取队列名(格式: prefix:STREAM:queue_name)
227
- local queue_name = string.match(stream_key, prefix .. ':STREAM:(.*)')
228
-
229
- -- 获取并递增offset
230
- local offset_key = prefix .. ':STREAM:' .. queue_name .. ':next_offset'
231
- local offset = redis.call('INCR', offset_key)
232
-
233
- -- 1. 添加消息到Stream(包含offset字段)
234
- local stream_id = redis.call('XADD', stream_key, '*',
235
- 'data', stream_data,
236
- 'offset', offset)
237
-
238
- -- 2. 设置任务状态Hash(只存储status)
239
- local task_key = prefix .. ':TASK:' .. stream_id
240
- redis.call('HSET', task_key, 'status', 'pending')
241
-
242
- -- 3. 设置过期时间(1小时)
243
- redis.call('EXPIRE', task_key, 3600)
244
-
245
- -- 保存stream_id到结果
246
- table.insert(results, stream_id)
247
- end
248
-
249
- return results
250
- """
54
+ # Lua脚本从config模块导入,统一管理
55
+ _LUA_SCRIPT_DELAYED_TASKS = LUA_SCRIPT_DELAYED_TASKS
56
+ _LUA_SCRIPT_NORMAL_TASKS = LUA_SCRIPT_NORMAL_TASKS
251
57
 
252
- def __init__(self, redis_url: str = None, include: list = None, max_connections: int = 200,
58
+ def __init__(self, redis_url: str = None, include: list = None, max_connections: int = None,
253
59
  consumer_strategy: str = None, consumer_config: dict = None, tasks=None,
254
60
  redis_prefix: str = None, scheduler_config: dict = None, pg_url: str = None,
255
- task_center=None) -> None:
61
+ task_center=None, worker_id: str = None, worker_key: str = None) -> None:
256
62
  self._tasks = tasks or {}
257
63
  self._queue_tasks = {} # 记录每个队列对应的任务列表
258
64
  self.asyncio = False
259
65
  self.include = include or []
260
-
66
+
261
67
  # 任务中心相关属性
262
68
  self.task_center = None # 将通过mount_task_center方法挂载或初始化时指定
263
69
  self._task_center_config = None
264
70
  self._original_redis_url = redis_url
265
71
  self._original_pg_url = pg_url
266
-
267
- self.redis_url = redis_url
268
- self.pg_url = pg_url # 存储PostgreSQL URL
269
- self.max_connections = max_connections
72
+
73
+ # 优先使用传入参数,其次使用环境变量
74
+ self.redis_url = redis_url or os.environ.get('JETTASK_REDIS_URL')
75
+ self.pg_url = pg_url or os.environ.get('JETTASK_PG_URL')
76
+ self.max_connections = max_connections if max_connections is not None else int(os.environ.get('JETTASK_MAX_CONNECTIONS', '500'))
77
+ self.redis_prefix = redis_prefix or os.environ.get('JETTASK_REDIS_PREFIX', 'jettask')
78
+
79
+ # 检查必需参数:redis_url
80
+ if not self.redis_url:
81
+ raise ValueError(
82
+ "必须提供 redis_url 参数!\n\n"
83
+ "请通过以下任一方式配置:\n"
84
+ " 1. 初始化时传参:\n"
85
+ " app = Jettask(redis_url='redis://localhost:6379/0')\n\n"
86
+ " 2. 设置环境变量:\n"
87
+ " export JETTASK_REDIS_URL='redis://localhost:6379/0'\n\n"
88
+ " 3. 在 .env 文件中配置:\n"
89
+ " JETTASK_REDIS_URL=redis://localhost:6379/0\n"
90
+ )
91
+
270
92
  self.consumer_strategy = consumer_strategy
271
93
  self.consumer_config = consumer_config or {}
272
94
  self.scheduler_config = scheduler_config or {}
273
-
274
- # Redis prefix configuration
275
- self.redis_prefix = redis_prefix or "jettask"
276
-
95
+
277
96
  # 如果初始化时提供了task_center,直接挂载
278
97
  if task_center:
279
98
  self.mount_task_center(task_center)
280
-
99
+
281
100
  # Update prefixes with the configured prefix using colon namespace
282
101
  self.STATUS_PREFIX = f"{self.redis_prefix}:STATUS:"
283
102
  self.RESULT_PREFIX = f"{self.redis_prefix}:RESULT:"
@@ -289,15 +108,33 @@ class Jettask(object):
289
108
  # 调度器相关
290
109
  self.scheduler = None
291
110
  self.scheduler_manager = None
292
-
111
+
293
112
  self._status_prefix = self.STATUS_PREFIX
294
113
  self._result_prefix = self.RESULT_PREFIX
295
-
114
+
115
+ # Worker 状态管理器(延迟初始化)
116
+ self.worker_state_manager = None
117
+
118
+ # Worker 状态查询器(延迟初始化 - 需要 Redis 客户端)
119
+ self._worker_state = None
120
+
121
+ # Worker ID(可选,用于子进程复用主进程的 ID)
122
+ self.worker_id = worker_id
123
+ self.worker_key = worker_key
124
+
296
125
  # 初始化清理状态,但不注册处理器
297
126
  self._cleanup_done = False
298
127
  self._should_exit = False
299
128
  self._worker_started = False
300
129
  self._handlers_registered = False
130
+
131
+ # 初始化队列注册表(用于获取任务名称等操作)
132
+ from ..messaging.registry import QueueRegistry
133
+ self.registry = QueueRegistry(
134
+ redis_client=None, # 延迟初始化,第一次使用时通过 self.redis 获取
135
+ async_redis_client=None,
136
+ redis_prefix=self.redis_prefix
137
+ )
301
138
 
302
139
 
303
140
  def _load_config_from_task_center(self):
@@ -343,7 +180,7 @@ class Jettask(object):
343
180
  else:
344
181
  self.redis_url = f"redis://{redis_host}:{redis_port}/{redis_db}"
345
182
 
346
- logger.info(f"从任务中心加载Redis配置: {redis_host}:{redis_port}/{redis_db}")
183
+ logger.debug(f"从任务中心加载Redis配置: {redis_host}:{redis_port}/{redis_db}")
347
184
 
348
185
  # 构建PostgreSQL URL
349
186
  if pg_config:
@@ -354,7 +191,7 @@ class Jettask(object):
354
191
  pg_database = pg_config.get('database', 'jettask')
355
192
 
356
193
  self.pg_url = f"postgresql://{pg_user}:{pg_password}@{pg_host}:{pg_port}/{pg_database}"
357
- logger.info(f"从任务中心加载PostgreSQL配置: {pg_host}:{pg_port}/{pg_database}")
194
+ logger.debug(f"从任务中心加载PostgreSQL配置: {pg_host}:{pg_port}/{pg_database}")
358
195
 
359
196
  # 保存配置供后续使用
360
197
  self._task_center_config = config
@@ -392,7 +229,7 @@ class Jettask(object):
392
229
  task_center: TaskCenter实例
393
230
 
394
231
  使用示例:
395
- from jettask.task_center import TaskCenter
232
+ from jettask.task.task_center.client import TaskCenter
396
233
 
397
234
  # 创建任务中心客户端(可复用)
398
235
  task_center = TaskCenter("http://localhost:8001/api/namespaces/demo")
@@ -456,7 +293,7 @@ class Jettask(object):
456
293
  return
457
294
  # 只有启动过worker才需要打印清理信息
458
295
  if self._worker_started:
459
- logger.info("Received shutdown signal, cleaning up...")
296
+ logger.debug("Received shutdown signal, cleaning up...")
460
297
  self.cleanup()
461
298
  if signum:
462
299
  # 设置标记表示需要退出
@@ -485,23 +322,22 @@ class Jettask(object):
485
322
  """清理应用资源"""
486
323
  if self._cleanup_done:
487
324
  return
488
-
489
325
  self._cleanup_done = True
490
326
 
491
327
  # 只有真正启动过worker才打印日志
492
328
  if self._worker_started:
493
- logger.info("Cleaning up Jettask resources...")
329
+
494
330
 
495
331
  # 清理EventPool
496
332
  if hasattr(self, 'ep') and self.ep:
497
333
  self.ep.cleanup()
498
334
 
499
- logger.info("Jettask cleanup completed")
335
+
500
336
  else:
501
337
  # 如果只是实例化但没有启动,静默清理
502
338
  if hasattr(self, 'ep') and self.ep:
503
339
  self.ep.cleanup()
504
- logger.debug("Jettask instance cleanup (no worker started)")
340
+
505
341
 
506
342
  @property
507
343
  def consumer_manager(self):
@@ -510,35 +346,67 @@ class Jettask(object):
510
346
 
511
347
  @property
512
348
  def async_redis(self):
513
- """优化:复用连接池"""
514
- name = "_async_redis"
515
- if hasattr(self, name):
516
- return getattr(self, name)
517
-
349
+ """获取异步Redis客户端(全局单例)"""
518
350
  # 如果配置了任务中心且还未加载配置,先加载配置
519
351
  if self.task_center and self.task_center.is_enabled and not self._task_center_config:
520
352
  self._load_config_from_task_center()
521
-
522
- pool = get_async_redis_pool(self.redis_url, self.max_connections)
523
- async_redis = aioredis.StrictRedis(connection_pool=pool)
524
- setattr(self, name, async_redis)
525
- return async_redis
353
+
354
+ # 使用超长超时时间,支持 Pub/Sub 长连接(可能几天没有消息)
355
+ return get_async_redis_client(
356
+ redis_url=self.redis_url,
357
+ decode_responses=True,
358
+ max_connections=self.max_connections,
359
+ socket_timeout=99999999999 # 超长超时,几乎永不超时
360
+ )
526
361
 
527
362
  @property
528
363
  def redis(self):
529
- """优化:复用连接池"""
530
- name = "_redis"
531
- if hasattr(self, name):
532
- return getattr(self, name)
533
-
364
+ """获取同步Redis客户端(全局单例)"""
534
365
  # 如果配置了任务中心且还未加载配置,先加载配置
535
366
  # if self.task_center and self.task_center.is_enabled and not self._task_center_config:
536
367
  # self._load_config_from_task_center()
537
- print(f'{self.redis_url=}')
538
- pool = get_redis_pool(self.redis_url, self.max_connections)
539
- redis_cli = redis.StrictRedis(connection_pool=pool)
540
- setattr(self, name, redis_cli)
541
- return redis_cli
368
+
369
+ return get_sync_redis_client(
370
+ redis_url=self.redis_url,
371
+ decode_responses=True,
372
+ max_connections=self.max_connections
373
+ )
374
+
375
+ @property
376
+ def binary_redis(self):
377
+ """获取同步二进制Redis客户端(不自动解码,用于获取msgpack数据)"""
378
+ return get_sync_redis_client(
379
+ redis_url=self.redis_url,
380
+ decode_responses=False,
381
+ max_connections=self.max_connections
382
+ )
383
+
384
+ @property
385
+ def async_binary_redis(self):
386
+ """获取异步二进制Redis客户端(不自动解码,用于获取msgpack数据)"""
387
+ return get_async_redis_client(
388
+ redis_url=self.redis_url,
389
+ decode_responses=False,
390
+ max_connections=self.max_connections,
391
+ socket_timeout=99999999999
392
+ )
393
+
394
+ @property
395
+ def worker_state(self):
396
+ """
397
+ 获取 WorkerState 实例(单例,延迟初始化)
398
+
399
+ WorkerState 负责 Worker 状态的查询和管理
400
+ """
401
+ if self._worker_state is None:
402
+ from jettask.worker.manager import WorkerState
403
+ self._worker_state = WorkerState(
404
+ redis_client=self.redis,
405
+ async_redis_client=self.async_redis,
406
+ redis_prefix=self.redis_prefix
407
+ )
408
+ logger.debug("Initialized WorkerState for app")
409
+ return self._worker_state
542
410
 
543
411
  @property
544
412
  def ep(self):
@@ -549,10 +417,10 @@ class Jettask(object):
549
417
  # 传递redis_prefix到consumer_config
550
418
  consumer_config = self.consumer_config.copy() if self.consumer_config else {}
551
419
  consumer_config['redis_prefix'] = self.redis_prefix
552
-
420
+
553
421
  ep = EventPool(
554
- self.redis,
555
- self.async_redis,
422
+ self.redis,
423
+ self.async_redis,
556
424
  redis_url=self.redis_url,
557
425
  consumer_strategy=self.consumer_strategy,
558
426
  consumer_config=consumer_config,
@@ -592,13 +460,13 @@ class Jettask(object):
592
460
  self.include += modules
593
461
 
594
462
  def _task_from_fun(
595
- self, fun, name=None, base=None, queue=None, bind=False, retry_config=None, **options
463
+ self, fun, name=None, base=None, queue=None, bind=False, retry_config=None, rate_limit=None, **options
596
464
  ) -> Task:
597
465
  name = name or gen_task_name(fun.__name__, fun.__module__)
598
466
  base = base or Task
599
-
467
+
600
468
  # 不再限制队列模式,因为每个task都有独立的consumer group
601
-
469
+
602
470
  if name not in self._tasks:
603
471
  run = staticmethod(fun)
604
472
  task: Task = type(
@@ -611,6 +479,7 @@ class Jettask(object):
611
479
  "run": run,
612
480
  "queue": queue,
613
481
  "retry_config": retry_config, # 存储重试配置
482
+ "rate_limit": rate_limit, # 存储限流配置
614
483
  "_decorated": True,
615
484
  "__doc__": fun.__doc__,
616
485
  "__module__": fun.__module__,
@@ -624,16 +493,67 @@ class Jettask(object):
624
493
  with contextlib.suppress(AttributeError):
625
494
  task.__qualname__ = fun.__qualname__
626
495
  self._tasks[task.name] = task
627
-
496
+
628
497
  # 记录队列和任务的映射(用于查找)
629
498
  if queue:
630
499
  if queue not in self._queue_tasks:
631
500
  self._queue_tasks[queue] = []
632
501
  self._queue_tasks[queue].append(name)
502
+
503
+ # 如果任务配置了限流,注册到Redis;否则删除旧配置
504
+ if rate_limit:
505
+ # 支持 int (QPS) 和 ConcurrencyLimit/QPSLimit 对象
506
+ if isinstance(rate_limit, int) and rate_limit > 0:
507
+ # 简单的 int 值作为 QPS 限制
508
+ self._register_rate_limit(name, rate_limit)
509
+ elif hasattr(rate_limit, 'to_dict'):
510
+ # RateLimitConfig 对象(ConcurrencyLimit 或 QPSLimit)
511
+ self._register_rate_limit_config(name, rate_limit)
512
+ else:
513
+ # 没有限流配置,删除 Redis 中的旧配置(如果存在)
514
+ from jettask.utils.rate_limit.limiter import RateLimiterManager
515
+
516
+ RateLimiterManager.unregister_rate_limit_config(
517
+ redis_client=self.redis,
518
+ task_name=name,
519
+ redis_prefix=self.redis_prefix
520
+ )
633
521
  else:
634
522
  task = self._tasks[name]
635
523
  return task
636
-
524
+
525
+ def _register_rate_limit(self, task_name: str, qps_limit: int):
526
+ """注册任务的 QPS 限流规则到 Redis"""
527
+ from jettask.utils.rate_limit.config import QPSLimit
528
+ from jettask.utils.rate_limit.limiter import RateLimiterManager
529
+
530
+ # 转换为 QPSLimit 配置对象
531
+ config = QPSLimit(qps=qps_limit)
532
+ # 调用 limiter.py 中的静态方法
533
+ RateLimiterManager.register_rate_limit_config(
534
+ redis_client=self.redis,
535
+ task_name=task_name,
536
+ config=config,
537
+ redis_prefix=self.redis_prefix
538
+ )
539
+
540
+ def _register_rate_limit_config(self, task_name: str, config):
541
+ """注册任务的限流配置对象到 Redis
542
+
543
+ Args:
544
+ task_name: 任务名称
545
+ config: RateLimitConfig 对象(ConcurrencyLimit 或 QPSLimit)
546
+ """
547
+ from jettask.utils.rate_limit.limiter import RateLimiterManager
548
+
549
+ # 调用 limiter.py 中的静态方法
550
+ RateLimiterManager.register_rate_limit_config(
551
+ redis_client=self.redis,
552
+ task_name=task_name,
553
+ config=config,
554
+ redis_prefix=self.redis_prefix
555
+ )
556
+
637
557
  def task(
638
558
  self,
639
559
  name: str = None,
@@ -644,6 +564,8 @@ class Jettask(object):
644
564
  retry_backoff: bool = True, # 是否使用指数退避
645
565
  retry_backoff_max: float = 60, # 最大退避时间(秒)
646
566
  retry_on_exceptions: tuple = None, # 可重试的异常类型
567
+ # 限流相关参数
568
+ rate_limit: int = None, # QPS 限制(每秒允许执行的任务数)
647
569
  *args,
648
570
  **kwargs,
649
571
  ):
@@ -659,56 +581,113 @@ class Jettask(object):
659
581
  # 将异常类转换为类名字符串,以便序列化
660
582
  if retry_on_exceptions:
661
583
  retry_config['retry_on_exceptions'] = [
662
- exc if isinstance(exc, str) else exc.__name__
584
+ exc if isinstance(exc, str) else exc.__name__
663
585
  for exc in retry_on_exceptions
664
586
  ]
665
- return self._task_from_fun(fun, name, base, queue, retry_config=retry_config, *args, **kwargs)
587
+ return self._task_from_fun(fun, name, base, queue, retry_config=retry_config, rate_limit=rate_limit, *args, **kwargs)
666
588
 
667
589
  return _create_task_cls
668
590
 
669
- async def send_tasks(self, messages: list):
591
+ def include_router(self, router, prefix: str = None):
670
592
  """
671
- 统一的任务发送接口 - 只有这一个发送方法
593
+ 包含一个TaskRouter,将其所有任务注册到app中
672
594
 
673
595
  Args:
674
- messages: TaskMessage对象列表(或字典列表)
596
+ router: TaskRouter实例
597
+ prefix: 额外的前缀(可选)
598
+ """
599
+ from ..task.router import TaskRouter
600
+
601
+ if not isinstance(router, TaskRouter):
602
+ raise TypeError(f"Expected TaskRouter, got {type(router)}")
603
+
604
+ # 获取router中的所有任务
605
+ tasks = router.get_tasks()
606
+
607
+ for task_name, task_config in tasks.items():
608
+ # 复制配置,避免修改原始数据
609
+ config = task_config.copy()
675
610
 
611
+ # 如果指定了额外前缀,添加到任务名前面
612
+ if prefix:
613
+ if config.get('name'):
614
+ config['name'] = f"{prefix}.{config['name']}"
615
+ else:
616
+ config['name'] = f"{prefix}.{task_name}"
617
+
618
+ # 获取任务函数和配置
619
+ func = config.pop('func')
620
+ name = config.pop('name', task_name)
621
+ queue = config.pop('queue', None)
622
+
623
+ # 提取重试相关参数
624
+ retry_config = {}
625
+ if 'max_retries' in config:
626
+ retry_config['max_retries'] = config.pop('max_retries', 0)
627
+ if 'retry_delay' in config:
628
+ retry_config['retry_backoff_max'] = config.pop('retry_delay', 60)
629
+
630
+ # 注册任务到app
631
+ self._task_from_fun(
632
+ func,
633
+ name=name,
634
+ queue=queue,
635
+ retry_config=retry_config if retry_config else None,
636
+ **config
637
+ )
638
+
639
+ def send_tasks(self, messages: list, asyncio: bool = False):
640
+ """
641
+ 统一的任务发送接口 - 支持同步和异步
642
+
643
+ Args:
644
+ messages: TaskMessage对象列表(或字典列表)
645
+ asyncio: 是否使用异步模式(默认False)
646
+
676
647
  Returns:
677
- List[str]: 任务ID列表
678
-
648
+ 同步模式: List[str] - 任务ID列表
649
+ 异步模式: 返回协程,需要使用 await
650
+
679
651
  使用示例:
680
652
  from jettask.core.message import TaskMessage
681
-
682
- # 发送单个任务(也是用列表)
653
+
654
+ # 同步发送
683
655
  msg = TaskMessage(
684
656
  queue="order_processing",
685
657
  args=(12345,),
686
658
  kwargs={"customer_id": "C001", "amount": 99.99}
687
659
  )
688
- task_ids = await app.send_tasks([msg])
689
-
660
+ task_ids = app.send_tasks([msg])
661
+
662
+ # 异步发送
663
+ task_ids = await app.send_tasks([msg], asyncio=True)
664
+
690
665
  # 批量发送
691
666
  messages = [
692
667
  TaskMessage(queue="email", kwargs={"to": "user1@example.com"}),
693
668
  TaskMessage(queue="email", kwargs={"to": "user2@example.com"}),
694
669
  TaskMessage(queue="sms", kwargs={"phone": "123456789"}),
695
670
  ]
696
- task_ids = await app.send_tasks(messages)
697
-
671
+ task_ids = app.send_tasks(messages)
672
+
698
673
  # 跨项目发送(不需要task定义)
699
674
  messages = [
700
675
  TaskMessage(queue="remote_queue", kwargs={"data": "value"})
701
676
  ]
702
- task_ids = await app.send_tasks(messages)
677
+ task_ids = await app.send_tasks(messages, asyncio=True)
703
678
  """
679
+ if asyncio:
680
+ return self._send_tasks_async(messages)
681
+ else:
682
+ return self._send_tasks_sync(messages)
683
+
684
+ def _send_tasks_sync(self, messages: list):
685
+ """同步发送任务"""
704
686
  if not messages:
705
687
  return []
706
-
707
- # 导入TaskMessage
708
- from .message import TaskMessage
709
-
688
+
710
689
  results = []
711
-
690
+
712
691
  # 按队列分组消息,以便批量处理
713
692
  queue_messages = {}
714
693
  for msg in messages:
@@ -717,10 +696,10 @@ class Jettask(object):
717
696
  msg = TaskMessage.from_dict(msg)
718
697
  elif not isinstance(msg, TaskMessage):
719
698
  raise ValueError(f"Invalid message type: {type(msg)}. Expected TaskMessage or dict")
720
-
699
+
721
700
  # 验证消息
722
701
  msg.validate()
723
-
702
+
724
703
  # 确定实际的队列名(考虑优先级)
725
704
  actual_queue = msg.queue
726
705
  if msg.priority is not None:
@@ -728,32 +707,69 @@ class Jettask(object):
728
707
  actual_queue = f"{msg.queue}:{msg.priority}"
729
708
  # 更新消息体中的queue字段,确保与实际发送的stream key一致
730
709
  msg.queue = actual_queue
731
-
710
+
732
711
  # 按队列分组
733
712
  if actual_queue not in queue_messages:
734
713
  queue_messages[actual_queue] = []
735
714
  queue_messages[actual_queue].append(msg)
736
-
715
+
737
716
  # 处理每个队列的消息
738
717
  for queue, queue_msgs in queue_messages.items():
739
- # 统一使用批量发送,无论是否广播模式
740
- # 广播/单播由消费端的consumer group name决定
741
- batch_results = await self._send_batch_messages(queue, queue_msgs)
718
+ batch_results = self._send_batch_messages_sync(queue, queue_msgs)
742
719
  results.extend(batch_results)
743
-
720
+
721
+ return results
722
+
723
+ async def _send_tasks_async(self, messages: list):
724
+ """异步发送任务"""
725
+ if not messages:
726
+ return []
727
+
728
+ results = []
729
+
730
+ # 按队列分组消息,以便批量处理
731
+ queue_messages = {}
732
+ for msg in messages:
733
+ # 支持TaskMessage对象或字典
734
+ if isinstance(msg, dict):
735
+ msg = TaskMessage.from_dict(msg)
736
+ elif not isinstance(msg, TaskMessage):
737
+ raise ValueError(f"Invalid message type: {type(msg)}. Expected TaskMessage or dict")
738
+
739
+ # 验证消息
740
+ msg.validate()
741
+
742
+ # 确定实际的队列名(考虑优先级)
743
+ actual_queue = msg.queue
744
+ if msg.priority is not None:
745
+ # 将优先级拼接到队列名后面
746
+ actual_queue = f"{msg.queue}:{msg.priority}"
747
+ # 更新消息体中的queue字段,确保与实际发送的stream key一致
748
+ msg.queue = actual_queue
749
+
750
+ # 按队列分组
751
+ if actual_queue not in queue_messages:
752
+ queue_messages[actual_queue] = []
753
+ queue_messages[actual_queue].append(msg)
754
+
755
+ # 处理每个队列的消息
756
+ for queue, queue_msgs in queue_messages.items():
757
+ batch_results = await self._send_batch_messages_async(queue, queue_msgs)
758
+ results.extend(batch_results)
759
+
744
760
  return results
745
761
 
746
- async def _send_batch_messages(self, queue: str, messages: list) -> list:
747
- """批量发送single模式消息(内部方法)"""
762
+ def _send_batch_messages_sync(self, queue: str, messages: list) -> list:
763
+ """批量发送任务(同步)"""
748
764
  from ..utils.serializer import dumps_str
749
-
765
+
750
766
  # 分离普通任务和延迟任务
751
767
  normal_messages = []
752
768
  delayed_messages = []
753
-
769
+
754
770
  for msg in messages:
755
771
  msg_dict = msg.to_dict()
756
-
772
+
757
773
  # 处理延迟任务
758
774
  if msg.delay and msg.delay > 0:
759
775
  # 添加延迟执行标记
@@ -763,9 +779,48 @@ class Jettask(object):
763
779
  delayed_messages.append((msg_dict, msg.delay))
764
780
  else:
765
781
  normal_messages.append(msg_dict)
766
-
782
+
767
783
  results = []
768
-
784
+
785
+ # 发送普通任务(统一使用批量发送)
786
+ if normal_messages:
787
+ batch_results = self.ep._batch_send_event_sync(
788
+ self.ep.get_prefixed_queue_name(queue),
789
+ [{'data': dumps_str(msg)} for msg in normal_messages],
790
+ self.ep.get_redis_client(asyncio=False, binary=True).pipeline()
791
+ )
792
+ results.extend(batch_results)
793
+
794
+ # 发送延迟任务(需要同时添加到DELAYED_QUEUE)
795
+ if delayed_messages:
796
+ delayed_results = self._send_delayed_tasks_sync(queue, delayed_messages)
797
+ results.extend(delayed_results)
798
+
799
+ return results
800
+
801
+ async def _send_batch_messages_async(self, queue: str, messages: list) -> list:
802
+ """批量发送任务(异步)"""
803
+ from ..utils.serializer import dumps_str
804
+
805
+ # 分离普通任务和延迟任务
806
+ normal_messages = []
807
+ delayed_messages = []
808
+
809
+ for msg in messages:
810
+ msg_dict = msg.to_dict()
811
+
812
+ # 处理延迟任务
813
+ if msg.delay and msg.delay > 0:
814
+ # 添加延迟执行标记
815
+ current_time = time.time()
816
+ msg_dict['execute_at'] = current_time + msg.delay
817
+ msg_dict['is_delayed'] = 1
818
+ delayed_messages.append((msg_dict, msg.delay))
819
+ else:
820
+ normal_messages.append(msg_dict)
821
+
822
+ results = []
823
+
769
824
  # 发送普通任务(统一使用批量发送)
770
825
  if normal_messages:
771
826
  batch_results = await self.ep._batch_send_event(
@@ -774,89 +829,799 @@ class Jettask(object):
774
829
  self.ep.get_redis_client(asyncio=True, binary=True).pipeline()
775
830
  )
776
831
  results.extend(batch_results)
777
-
832
+
778
833
  # 发送延迟任务(需要同时添加到DELAYED_QUEUE)
779
834
  if delayed_messages:
780
- delayed_results = await self._send_delayed_tasks(queue, delayed_messages)
835
+ delayed_results = await self._send_delayed_tasks_async(queue, delayed_messages)
781
836
  results.extend(delayed_results)
782
-
837
+
783
838
  return results
784
839
 
785
- async def _send_delayed_tasks(self, queue: str, delayed_messages: list) -> list:
786
- """发送延迟任务到Stream并添加到延迟队列"""
787
- from ..utils.serializer import dumps_str
788
-
789
- # 使用Lua脚本原子性地处理延迟任务
790
- lua_script = """
791
- local prefix = ARGV[1]
792
- local results = {}
793
-
794
- -- 从ARGV[2]开始,每4个参数为一组任务信息
795
- -- [stream_key, stream_data, execute_at, queue]
796
- for i = 2, #ARGV, 4 do
797
- local stream_key = ARGV[i]
798
- local stream_data = ARGV[i+1]
799
- local execute_at = tonumber(ARGV[i+2])
800
- local queue_name = ARGV[i+3]
801
-
802
- -- 使用Hash存储所有队列的offset
803
- local offsets_hash = prefix .. ':QUEUE_OFFSETS'
804
-
805
- -- 从stream_key中提取队列名
806
- local queue_name = string.gsub(stream_key, '^' .. prefix .. ':QUEUE:', '')
807
-
808
- -- 使用HINCRBY原子递增offset
809
- local current_offset = redis.call('HINCRBY', offsets_hash, queue_name, 1)
810
-
811
- -- 1. 添加消息到Stream(包含offset字段)
812
- local stream_id = redis.call('XADD', stream_key, '*',
813
- 'data', stream_data,
814
- 'offset', current_offset)
815
-
816
- -- 2. 添加到延迟队列ZSET
817
- local delayed_queue_key = prefix .. ':DELAYED_QUEUE:' .. queue_name
818
- redis.call('ZADD', delayed_queue_key, execute_at, stream_id)
819
-
820
- -- 3. 设置任务状态Hash
821
- local task_key = prefix .. ':TASK:' .. stream_id
822
- redis.call('HSET', task_key, 'status', 'delayed')
823
- redis.call('EXPIRE', task_key, 3600)
824
-
825
- -- 保存stream_id到结果
826
- table.insert(results, stream_id)
827
- end
828
-
829
- return results
840
+ def _send_delayed_tasks_sync(self, queue: str, delayed_messages: list) -> list:
841
+ """发送延迟任务到Stream并添加到延迟队列(同步)
842
+
843
+ Args:
844
+ queue: 队列名,可能包含优先级后缀(如 "queue_name:6")
845
+ delayed_messages: 延迟消息列表,每项为 (msg_dict, delay_seconds)
846
+
847
+ Note:
848
+ 延迟队列和Stream现在完全对应(包括优先级后缀)。
849
+ Scanner会动态发现所有优先级队列并扫描对应的延迟队列。
830
850
  """
831
-
851
+ from ..utils.serializer import dumps_str
852
+ from ..messaging.registry import QueueRegistry
853
+
854
+ # 注册队列(确保队列在注册表中,Scanner才能发现它)
855
+ registry = QueueRegistry(self.redis, self.async_redis, self.redis_prefix)
856
+
857
+ # 如果队列包含优先级后缀,注册为优先级队列
858
+ if ':' in queue and queue.rsplit(':', 1)[1].isdigit():
859
+ base_queue = queue.rsplit(':', 1)[0]
860
+ priority = int(queue.rsplit(':', 1)[1])
861
+ registry.register_queue_sync(base_queue)
862
+ registry.register_priority_queue_sync(base_queue, priority)
863
+ logger.debug(f"Registered priority queue: {queue} (base: {base_queue}, priority: {priority})")
864
+ else:
865
+ registry.register_queue_sync(queue)
866
+ logger.debug(f"Registered queue: {queue}")
867
+
832
868
  # 准备Lua脚本参数
833
869
  lua_args = [self.redis_prefix]
834
870
  prefixed_queue = self.ep.get_prefixed_queue_name(queue)
835
-
836
- for msg_dict, delay_seconds in delayed_messages:
871
+
872
+ for msg_dict, _ in delayed_messages:
837
873
  stream_data = dumps_str(msg_dict)
838
874
  execute_at = msg_dict['execute_at']
839
-
875
+
876
+ # 延迟队列名和Stream名现在完全对应
840
877
  lua_args.extend([
841
- prefixed_queue,
842
- stream_data,
843
- str(execute_at),
844
- queue
878
+ prefixed_queue, # Stream 键(包含优先级)
879
+ stream_data, # 消息数据
880
+ str(execute_at) # 执行时间
845
881
  ])
846
-
882
+
883
+ # 执行Lua脚本
884
+ client = self.ep.get_redis_client(asyncio=False, binary=True)
885
+
886
+ # 注册Lua脚本(使用config模块中的脚本)
887
+ if not hasattr(self, '_delayed_task_script_sync'):
888
+ self._delayed_task_script_sync = client.register_script(LUA_SCRIPT_SEND_DELAYED_TASKS)
889
+
890
+ # 执行脚本
891
+ results = self._delayed_task_script_sync(keys=[], args=lua_args)
892
+
893
+ # 解码结果
894
+ decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
895
+ return decoded_results
896
+
897
+ async def _send_delayed_tasks_async(self, queue: str, delayed_messages: list) -> list:
898
+ """发送延迟任务到Stream并添加到延迟队列(异步)
899
+
900
+ Args:
901
+ queue: 队列名,可能包含优先级后缀(如 "queue_name:6")
902
+ delayed_messages: 延迟消息列表,每项为 (msg_dict, delay_seconds)
903
+
904
+ Note:
905
+ 延迟队列和Stream现在完全对应(包括优先级后缀)。
906
+ Scanner会动态发现所有优先级队列并扫描对应的延迟队列。
907
+ """
908
+ from ..utils.serializer import dumps_str
909
+ from ..messaging.registry import QueueRegistry
910
+
911
+ # 注册队列(确保队列在注册表中,Scanner才能发现它)
912
+ registry = QueueRegistry(self.redis, self.async_redis, self.redis_prefix)
913
+
914
+ # 如果队列包含优先级后缀,注册为优先级队列
915
+ if ':' in queue and queue.rsplit(':', 1)[1].isdigit():
916
+ base_queue = queue.rsplit(':', 1)[0]
917
+ priority = int(queue.rsplit(':', 1)[1])
918
+ await registry.register_queue(base_queue)
919
+ await registry.register_priority_queue(base_queue, priority)
920
+ logger.debug(f"Registered priority queue: {queue} (base: {base_queue}, priority: {priority})")
921
+ else:
922
+ await registry.register_queue(queue)
923
+ logger.debug(f"Registered queue: {queue}")
924
+
925
+ # 准备Lua脚本参数
926
+ lua_args = [self.redis_prefix]
927
+ prefixed_queue = self.ep.get_prefixed_queue_name(queue)
928
+
929
+ for msg_dict, _ in delayed_messages:
930
+ stream_data = dumps_str(msg_dict)
931
+ execute_at = msg_dict['execute_at']
932
+
933
+ # 延迟队列名和Stream名现在完全对应
934
+ lua_args.extend([
935
+ prefixed_queue, # Stream 键(包含优先级)
936
+ stream_data, # 消息数据
937
+ str(execute_at) # 执行时间
938
+ ])
939
+
847
940
  # 执行Lua脚本
848
941
  client = self.ep.get_redis_client(asyncio=True, binary=True)
849
-
850
- # 注册Lua脚本
851
- if not hasattr(self, '_delayed_task_script'):
852
- self._delayed_task_script = client.register_script(lua_script)
853
-
942
+
943
+ # 注册Lua脚本(使用config模块中的脚本)
944
+ if not hasattr(self, '_delayed_task_script_async'):
945
+ self._delayed_task_script_async = client.register_script(LUA_SCRIPT_SEND_DELAYED_TASKS)
946
+
854
947
  # 执行脚本
855
- results = await self._delayed_task_script(keys=[], args=lua_args)
856
-
948
+ results = await self._delayed_task_script_async(keys=[], args=lua_args)
949
+
857
950
  # 解码结果
858
- return [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
859
-
951
+ decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
952
+ return decoded_results
953
+
954
+ def _get_task_names_from_queue(self, queue: str, task_name: str = None) -> list:
955
+ """获取队列的任务名列表
956
+
957
+ Args:
958
+ queue: 队列名称(可能包含优先级后缀)
959
+ task_name: 可选的任务名,如果提供则直接返回 [task_name]
960
+
961
+ Returns:
962
+ 任务名列表,如果 task_name 提供则返回 [task_name],否则返回队列的所有任务名
963
+ """
964
+ if task_name is not None:
965
+ return [task_name]
966
+
967
+ # 确保 registry 有 redis_client
968
+ if self.registry.redis is None:
969
+ self.registry.redis = self.redis
970
+ self.registry.async_redis = self.async_redis
971
+
972
+ # 从 base_queue 中提取基础队列名(去掉优先级)
973
+ base_queue = queue.split(':')[0] if ':' in queue else queue
974
+ task_names = self.registry.get_task_names_by_queue_sync(base_queue)
975
+
976
+ return list(task_names) if task_names else []
977
+
978
+ def get_result(self, event_id: str, queue: str, task_name: str = None,
979
+ delete: bool = False, asyncio: bool = False,
980
+ delayed_deletion_ex: int = None, wait: bool = False,
981
+ timeout: int = 300, poll_interval: float = 0.5):
982
+ """获取任务执行结果
983
+
984
+ 在任务组架构下,每个任务都有独立的执行结果存储。
985
+ 结果存储格式: {redis_prefix}:TASK:{event_id}:{group_name}
986
+
987
+ 这个方法支持完全解耦的生产者-消费者模式,生产者只需要知道:
988
+ - event_id: 发送任务时返回的事件ID
989
+ - queue: 队列名称
990
+ - task_name: 任务名称(可选,不提供时会获取该队列所有任务的结果)
991
+
992
+ Args:
993
+ event_id: 任务事件ID(发送任务时返回的消息ID)
994
+ queue: 队列名称
995
+ task_name: 任务名称(可选)。如果不提供,会获取该队列所有任务的结果,返回列表
996
+ delete: 是否删除结果(默认False)
997
+ asyncio: 是否使用异步模式(默认False)
998
+ delayed_deletion_ex: 延迟删除时间(秒),设置后会在指定时间后自动删除
999
+ wait: 是否阻塞等待直到任务完成(默认False)
1000
+ timeout: 等待超时时间(秒),默认300秒
1001
+ poll_interval: 轮询间隔(秒),默认0.5秒
1002
+
1003
+ Returns:
1004
+ 当指定task_name时:
1005
+ 同步模式: 任务结果(字符串或字节),如果任务未完成返回None
1006
+ 异步模式: 返回协程,需要使用 await
1007
+ 当不指定task_name时:
1008
+ 返回列表,每个元素是字典: [{"task_name": "xxx", "result": ..., "status": ...}, ...]
1009
+
1010
+ Raises:
1011
+ TaskTimeoutError: 等待超时
1012
+ TaskExecutionError: 任务执行失败
1013
+ TaskNotFoundError: 任务不存在
1014
+
1015
+ Examples:
1016
+ # 获取单个任务结果
1017
+ result = app.get_result("1234567890-0", "my_queue", task_name="my_task")
1018
+
1019
+ # 获取队列中所有任务的结果
1020
+ results = app.get_result("1234567890-0", "my_queue")
1021
+ # 返回: [{"task_name": "task1", "result": ..., "status": ...}, {"task_name": "task2", ...}]
1022
+
1023
+ # 异步获取所有任务结果
1024
+ results = await app.get_result("1234567890-0", "my_queue", asyncio=True)
1025
+ """
1026
+ # 判断是否指定了 task_name,决定最终返回格式
1027
+ return_single = task_name is not None
1028
+
1029
+ # 获取需要查询的任务名列表
1030
+ task_names = self._get_task_names_from_queue(queue, task_name)
1031
+
1032
+ # 如果没有任务,直接返回空列表
1033
+ if not task_names:
1034
+ if asyncio:
1035
+ async def _return_empty_list():
1036
+ return []
1037
+ return _return_empty_list()
1038
+ else:
1039
+ return []
1040
+
1041
+ # 统一处理:遍历所有任务获取结果
1042
+ if asyncio:
1043
+ return self._get_results_async(event_id, queue, task_names, delete,
1044
+ delayed_deletion_ex, wait, timeout, poll_interval, return_single)
1045
+ else:
1046
+ return self._get_results_sync(event_id, queue, task_names, delete,
1047
+ delayed_deletion_ex, wait, timeout, poll_interval, return_single)
1048
+
1049
+ def get_queue_position(self, event_id: str, queue: str, task_name: str = None, asyncio: bool = False):
1050
+ """获取任务在队列中的排队情况
1051
+
1052
+ 通过 event_id 查询任务在队列中的排队位置,包括:
1053
+ - 距离被读取还差多少任务
1054
+ - 距离被消费还差多少任务
1055
+
1056
+ Args:
1057
+ event_id: 任务事件ID(发送任务时返回的消息ID)
1058
+ queue: 队列名称
1059
+ task_name: 任务名称(可选)。如果不提供,会获取该队列所有任务的排队情况
1060
+ asyncio: 是否使用异步模式(默认False)
1061
+
1062
+ Returns:
1063
+ 当指定task_name时:
1064
+ 返回字典: {
1065
+ "task_name": "xxx",
1066
+ "task_offset": 12,
1067
+ "read_offset": 14,
1068
+ "task_ack_offset": 10,
1069
+ "pending_read": 2, # 距离被读取还差2个任务
1070
+ "pending_consume": -2 # 已经被消费了(负数表示已完成)
1071
+ }
1072
+ 当不指定task_name时:
1073
+ 返回列表,每个元素是上述格式的字典
1074
+
1075
+ Note:
1076
+ 排名信息反映的是任务的发送顺序(offset),而不是执行顺序。
1077
+ 在并发执行的场景下,排名靠后的任务可能先执行完成,而排名靠前的任务可能还在执行中。
1078
+
1079
+ 例如:
1080
+ - 任务A (offset=10) 和任务B (offset=15) 同时被读取
1081
+ - 如果任务B执行得快,可能会先完成
1082
+ - 此时任务A的 pending_consume 可能仍为正数(还未消费确认)
1083
+ - 而任务B的 pending_consume 已经变为负数(已完成)
1084
+
1085
+ 因此:
1086
+ - pending_read 表示有多少任务在你之前被发送到队列
1087
+ - pending_consume 表示有多少任务在你之前被消费确认(不代表执行顺序)
1088
+ - 负数的 pending_consume 只表示该任务已被确认,不表示所有前面的任务都已完成
1089
+
1090
+ Examples:
1091
+ # 获取单个任务的排队情况
1092
+ position = app.get_queue_position("1234567890-0", "my_queue", task_name="my_task")
1093
+
1094
+ # 获取队列中所有任务的排队情况
1095
+ positions = app.get_queue_position("1234567890-0", "my_queue")
1096
+
1097
+ # 异步获取
1098
+ position = await app.get_queue_position("1234567890-0", "my_queue", asyncio=True)
1099
+ """
1100
+ # 判断是否指定了 task_name,决定最终返回格式
1101
+ return_single = task_name is not None
1102
+
1103
+ # 获取需要查询的任务名列表
1104
+ task_names = self._get_task_names_from_queue(queue, task_name)
1105
+
1106
+ # 如果没有任务,直接返回空列表
1107
+ if not task_names:
1108
+ if asyncio:
1109
+ async def _return_empty_list():
1110
+ return []
1111
+ return _return_empty_list()
1112
+ else:
1113
+ return []
1114
+
1115
+ # 统一处理:遍历所有任务获取排队情况
1116
+ if asyncio:
1117
+ return self._get_queue_positions_async(event_id, queue, task_names, return_single)
1118
+ else:
1119
+ return self._get_queue_positions_sync(event_id, queue, task_names, return_single)
1120
+
1121
+ def _get_queue_positions_sync(self, event_id: str, queue: str, task_names: list, return_single: bool):
1122
+ """同步获取任务排队情况"""
1123
+ results = []
1124
+
1125
+ # 构建 stream key
1126
+ prefixed_queue = f"{self.redis_prefix}:QUEUE:{queue}"
1127
+
1128
+ # 从 stream 中获取任务数据
1129
+ try:
1130
+ # XRANGE 获取指定 event_id 的消息
1131
+ stream_data = self.binary_redis.xrange(prefixed_queue, min=event_id, max=event_id, count=1)
1132
+
1133
+ if not stream_data:
1134
+ # 任务不存在于 stream 中
1135
+ for task_name in task_names:
1136
+ results.append({
1137
+ "task_name": task_name,
1138
+ "error": "Task not found in stream"
1139
+ })
1140
+ if return_single:
1141
+ return results[0] if results else None
1142
+ return results
1143
+
1144
+ # 解析 stream 数据
1145
+ message_id, message_data = stream_data[0]
1146
+
1147
+ # 解码字段
1148
+ task_offset = None
1149
+ for key, value in message_data.items():
1150
+ if isinstance(key, bytes):
1151
+ key = key.decode('utf-8')
1152
+ if key == 'offset':
1153
+ if isinstance(value, bytes):
1154
+ task_offset = int(value.decode('utf-8'))
1155
+ else:
1156
+ task_offset = int(value)
1157
+ break
1158
+
1159
+ if task_offset is None:
1160
+ for task_name in task_names:
1161
+ results.append({
1162
+ "task_name": task_name,
1163
+ "error": "Offset not found in task data"
1164
+ })
1165
+ if return_single:
1166
+ return results[0] if results else None
1167
+ return results
1168
+
1169
+ except Exception as e:
1170
+ for task_name in task_names:
1171
+ results.append({
1172
+ "task_name": task_name,
1173
+ "error": f"Failed to read from stream: {str(e)}"
1174
+ })
1175
+ if return_single:
1176
+ return results[0] if results else None
1177
+ return results
1178
+
1179
+ # 获取 READ_OFFSETS 和 TASK_OFFSETS(使用 pipeline + HMGET 优化)
1180
+ read_offsets_key = f"{self.redis_prefix}:READ_OFFSETS"
1181
+ task_offsets_key = f"{self.redis_prefix}:TASK_OFFSETS"
1182
+
1183
+ # 提取基础队列名(去掉优先级)
1184
+ base_queue = queue.split(':')[0] if ':' in queue else queue
1185
+
1186
+ # 构建需要查询的字段列表
1187
+ offset_keys = [f"{base_queue}:{task_name}" for task_name in task_names]
1188
+
1189
+ try:
1190
+ # 使用 pipeline 批量获取所有需要的字段
1191
+ pipeline = self.redis.pipeline()
1192
+ pipeline.hmget(read_offsets_key, offset_keys)
1193
+ pipeline.hmget(task_offsets_key, offset_keys)
1194
+ read_offsets_list, task_offsets_list = pipeline.execute()
1195
+ except Exception as e:
1196
+ for task_name in task_names:
1197
+ results.append({
1198
+ "task_name": task_name,
1199
+ "error": f"Failed to read offsets: {str(e)}"
1200
+ })
1201
+ if return_single:
1202
+ return results[0] if results else None
1203
+ return results
1204
+
1205
+ # 对每个任务计算排队情况
1206
+ for idx, task_name in enumerate(task_names):
1207
+ # 获取 read_offset
1208
+ read_offset = read_offsets_list[idx]
1209
+ if read_offset is not None:
1210
+ read_offset = int(read_offset)
1211
+
1212
+ # 获取 task_ack_offset
1213
+ task_ack_offset = task_offsets_list[idx]
1214
+ if task_ack_offset is not None:
1215
+ task_ack_offset = int(task_ack_offset)
1216
+
1217
+ # 计算排队情况
1218
+ # pending_read: 正数表示还差多少个任务才能被读取,0表示刚好被读取,负数表示已被读取
1219
+ pending_read = (task_offset - read_offset) if read_offset is not None else None
1220
+ # pending_consume: 正数表示还差多少个任务才能被消费,0表示刚好被消费,负数表示已被消费
1221
+ pending_consume = (task_offset - task_ack_offset) if task_ack_offset is not None else None
1222
+
1223
+ results.append({
1224
+ "task_name": task_name,
1225
+ "task_offset": task_offset,
1226
+ "read_offset": read_offset,
1227
+ "task_ack_offset": task_ack_offset,
1228
+ "pending_read": pending_read,
1229
+ "pending_consume": pending_consume
1230
+ })
1231
+
1232
+ # 根据 return_single 决定返回格式
1233
+ if return_single:
1234
+ return results[0] if results else None
1235
+ return results
1236
+
1237
+ async def _get_queue_positions_async(self, event_id: str, queue: str, task_names: list, return_single: bool):
1238
+ """异步获取任务排队情况"""
1239
+ results = []
1240
+
1241
+ # 构建 stream key
1242
+ prefixed_queue = f"{self.redis_prefix}:QUEUE:{queue}"
1243
+
1244
+ # 从 stream 中获取任务数据
1245
+ try:
1246
+ # XRANGE 获取指定 event_id 的消息
1247
+ stream_data = await self.async_binary_redis.xrange(prefixed_queue, min=event_id, max=event_id, count=1)
1248
+
1249
+ if not stream_data:
1250
+ # 任务不存在于 stream 中
1251
+ for task_name in task_names:
1252
+ results.append({
1253
+ "task_name": task_name,
1254
+ "error": "Task not found in stream"
1255
+ })
1256
+ if return_single:
1257
+ return results[0] if results else None
1258
+ return results
1259
+
1260
+ # 解析 stream 数据
1261
+ message_id, message_data = stream_data[0]
1262
+
1263
+ # 解码字段
1264
+ task_offset = None
1265
+ for key, value in message_data.items():
1266
+ if isinstance(key, bytes):
1267
+ key = key.decode('utf-8')
1268
+ if key == 'offset':
1269
+ if isinstance(value, bytes):
1270
+ task_offset = int(value.decode('utf-8'))
1271
+ else:
1272
+ task_offset = int(value)
1273
+ break
1274
+
1275
+ if task_offset is None:
1276
+ for task_name in task_names:
1277
+ results.append({
1278
+ "task_name": task_name,
1279
+ "error": "Offset not found in task data"
1280
+ })
1281
+ if return_single:
1282
+ return results[0] if results else None
1283
+ return results
1284
+
1285
+ except Exception as e:
1286
+ for task_name in task_names:
1287
+ results.append({
1288
+ "task_name": task_name,
1289
+ "error": f"Failed to read from stream: {str(e)}"
1290
+ })
1291
+ if return_single:
1292
+ return results[0] if results else None
1293
+ return results
1294
+
1295
+ # 获取 READ_OFFSETS 和 TASK_OFFSETS(使用 pipeline + HMGET 优化)
1296
+ read_offsets_key = f"{self.redis_prefix}:READ_OFFSETS"
1297
+ task_offsets_key = f"{self.redis_prefix}:TASK_OFFSETS"
1298
+
1299
+ # 提取基础队列名(去掉优先级)
1300
+ base_queue = queue.split(':')[0] if ':' in queue else queue
1301
+
1302
+ # 构建需要查询的字段列表
1303
+ offset_keys = [f"{base_queue}:{task_name}" for task_name in task_names]
1304
+
1305
+ try:
1306
+ # 使用 pipeline 批量获取所有需要的字段
1307
+ pipeline = self.async_redis.pipeline()
1308
+ pipeline.hmget(read_offsets_key, offset_keys)
1309
+ pipeline.hmget(task_offsets_key, offset_keys)
1310
+ read_offsets_list, task_offsets_list = await pipeline.execute()
1311
+ except Exception as e:
1312
+ for task_name in task_names:
1313
+ results.append({
1314
+ "task_name": task_name,
1315
+ "error": f"Failed to read offsets: {str(e)}"
1316
+ })
1317
+ if return_single:
1318
+ return results[0] if results else None
1319
+ return results
1320
+
1321
+ # 对每个任务计算排队情况
1322
+ for idx, task_name in enumerate(task_names):
1323
+ # 获取 read_offset
1324
+ read_offset = read_offsets_list[idx]
1325
+ if read_offset is not None:
1326
+ read_offset = int(read_offset)
1327
+
1328
+ # 获取 task_ack_offset
1329
+ task_ack_offset = task_offsets_list[idx]
1330
+ if task_ack_offset is not None:
1331
+ task_ack_offset = int(task_ack_offset)
1332
+
1333
+ # 计算排队情况
1334
+ # pending_read: 正数表示还差多少个任务才能被读取,0表示刚好被读取,负数表示已被读取
1335
+ pending_read = (task_offset - read_offset) if read_offset is not None else None
1336
+ # pending_consume: 正数表示还差多少个任务才能被消费,0表示刚好被消费,负数表示已被消费
1337
+ pending_consume = (task_offset - task_ack_offset) if task_ack_offset is not None else None
1338
+
1339
+ results.append({
1340
+ "task_name": task_name,
1341
+ "task_offset": task_offset,
1342
+ "read_offset": read_offset,
1343
+ "task_ack_offset": task_ack_offset,
1344
+ "pending_read": pending_read,
1345
+ "pending_consume": pending_consume
1346
+ })
1347
+
1348
+ # 根据 return_single 决定返回格式
1349
+ if return_single:
1350
+ return results[0] if results else None
1351
+ return results
1352
+
1353
+ def _build_task_key(self, task_name: str, queue: str, event_id: str):
1354
+ """构建任务的 key 信息
1355
+
1356
+ Returns:
1357
+ tuple: (group_name, full_key)
1358
+ """
1359
+ prefixed_queue = f"{self.redis_prefix}:QUEUE:{queue}"
1360
+ group_name = f"{prefixed_queue}:{task_name}"
1361
+ status_key = f"{event_id}:{group_name}"
1362
+ full_key = f"{self.redis_prefix}:TASK:{status_key}"
1363
+ return group_name, full_key
1364
+
1365
+ @staticmethod
1366
+ def _decode_bytes(value):
1367
+ """解码字节为字符串"""
1368
+ if value and isinstance(value, bytes):
1369
+ return value.decode('utf-8')
1370
+ return value
1371
+
1372
+ @staticmethod
1373
+ def _is_task_completed(status):
1374
+ """检查任务是否已完成(成功)"""
1375
+ return status in [TaskStatus.COMPLETED.value, TaskStatus.SUCCESS.value]
1376
+
1377
+ @staticmethod
1378
+ def _is_task_failed(status):
1379
+ """检查任务是否失败"""
1380
+ return status in [TaskStatus.ERROR.value, TaskStatus.FAILED.value, "ERROR", "FAILED", "error", "failed"]
1381
+
1382
+ def _get_results_sync(self, event_id: str, queue: str, task_names: list,
1383
+ delete: bool, delayed_deletion_ex: int, wait: bool,
1384
+ timeout: int, poll_interval: float, return_single: bool):
1385
+ """同步获取任务结果(支持单个或批量)"""
1386
+ results = []
1387
+
1388
+ for task_name in task_names:
1389
+ try:
1390
+ _, full_key = self._build_task_key(task_name, queue, event_id)
1391
+
1392
+ # 统一调用 _get_result_sync,通过 wait 参数控制行为
1393
+ task_info = self._get_result_sync(full_key, event_id, delete, delayed_deletion_ex,
1394
+ wait, timeout, poll_interval)
1395
+
1396
+ # 如果任务不存在
1397
+ if not task_info:
1398
+ results.append({
1399
+ "task_name": task_name,
1400
+ "status": None,
1401
+ "result": None
1402
+ })
1403
+ else:
1404
+ # 添加 task_name 到结果中
1405
+ task_info["task_name"] = task_name
1406
+ results.append(task_info)
1407
+
1408
+ except Exception as e:
1409
+ results.append({
1410
+ "task_name": task_name,
1411
+ "status": "ERROR",
1412
+ "result": None,
1413
+ "error_msg": str(e)
1414
+ })
1415
+
1416
+ # 根据 return_single 决定返回格式
1417
+ if return_single:
1418
+ return results[0] if results else None
1419
+ return results
1420
+
1421
+ async def _get_results_async(self, event_id: str, queue: str, task_names: list,
1422
+ delete: bool, delayed_deletion_ex: int, wait: bool,
1423
+ timeout: int, poll_interval: float, return_single: bool):
1424
+ """异步获取任务结果(支持单个或批量)"""
1425
+ results = []
1426
+
1427
+ for task_name in task_names:
1428
+ try:
1429
+ _, full_key = self._build_task_key(task_name, queue, event_id)
1430
+
1431
+ # 统一调用 _get_result_async,通过 wait 参数控制行为
1432
+ task_info = await self._get_result_async(full_key, event_id, delete, delayed_deletion_ex,
1433
+ wait, timeout, poll_interval)
1434
+
1435
+ # 如果任务不存在
1436
+ if not task_info:
1437
+ results.append({
1438
+ "task_name": task_name,
1439
+ "status": TaskStatus.PENDING.value,
1440
+ "result": None
1441
+ })
1442
+ else:
1443
+ # 添加 task_name 到结果中
1444
+ task_info["task_name"] = task_name
1445
+ results.append(task_info)
1446
+
1447
+ except Exception as e:
1448
+ results.append({
1449
+ "task_name": task_name,
1450
+ "status": "ERROR",
1451
+ "result": None,
1452
+ "error_msg": str(e)
1453
+ })
1454
+
1455
+ # 根据 return_single 决定返回格式
1456
+ if return_single:
1457
+ return results[0] if results else None
1458
+ return results
1459
+
1460
+ def _get_result_sync(self, full_key: str, event_id: str, delete: bool, delayed_deletion_ex: int,
1461
+ wait: bool = False, timeout: int = 300, poll_interval: float = 0.5):
1462
+ """同步获取任务结果(支持等待模式)"""
1463
+ from ..exceptions import TaskTimeoutError, TaskExecutionError, TaskNotFoundError
1464
+
1465
+ # 使用二进制客户端,不自动解码(因为 result 是 msgpack 序列化的)
1466
+ client = self.binary_redis
1467
+ start_time = time.time()
1468
+
1469
+ while True:
1470
+ # 获取整个 hash 的所有字段
1471
+ task_data = client.hgetall(full_key)
1472
+
1473
+ if not task_data:
1474
+ if wait:
1475
+ raise TaskNotFoundError(f"Task {event_id} not found")
1476
+ return None
1477
+
1478
+ # 解码字节字段
1479
+ decoded_data = {}
1480
+ for key, value in task_data.items():
1481
+ # 解码 key
1482
+ if isinstance(key, bytes):
1483
+ key = key.decode('utf-8')
1484
+
1485
+ # 跳过内部标记字段
1486
+ if key.startswith('__'):
1487
+ continue
1488
+
1489
+ # 解码 value - 只有 result 字段需要 loads_str
1490
+ if isinstance(value, bytes):
1491
+ if key == 'result':
1492
+ try:
1493
+ decoded_data[key] = loads_str(value)
1494
+ except Exception:
1495
+ decoded_data[key] = value
1496
+ else:
1497
+ # 其他字段尝试 UTF-8 解码
1498
+ try:
1499
+ decoded_data[key] = value.decode('utf-8')
1500
+ except Exception:
1501
+ decoded_data[key] = value
1502
+ else:
1503
+ decoded_data[key] = value
1504
+
1505
+ # 如果不需要等待,处理删除逻辑后直接返回
1506
+ if not wait:
1507
+ if delayed_deletion_ex is not None:
1508
+ client.expire(full_key, delayed_deletion_ex)
1509
+ elif delete:
1510
+ if self.task_center and self.task_center.is_enabled:
1511
+ client.hset(full_key, "__pending_delete", "1")
1512
+ else:
1513
+ client.delete(full_key)
1514
+ return decoded_data
1515
+
1516
+ # 需要等待:检查任务状态
1517
+ status = decoded_data.get('status')
1518
+
1519
+ # 检查任务是否完成
1520
+ if self._is_task_completed(status):
1521
+ # 任务成功完成,处理删除逻辑后返回
1522
+ if delayed_deletion_ex is not None:
1523
+ client.expire(full_key, delayed_deletion_ex)
1524
+ elif delete:
1525
+ if self.task_center and self.task_center.is_enabled:
1526
+ client.hset(full_key, "__pending_delete", "1")
1527
+ else:
1528
+ client.delete(full_key)
1529
+ return decoded_data
1530
+
1531
+ elif self._is_task_failed(status):
1532
+ # 任务失败,抛出异常
1533
+ error_msg = decoded_data.get('error_msg', 'Task execution failed')
1534
+ raise TaskExecutionError(event_id, error_msg)
1535
+
1536
+ # 检查超时
1537
+ if time.time() - start_time > timeout:
1538
+ raise TaskTimeoutError(f"Task {event_id} timed out after {timeout} seconds")
1539
+
1540
+ # 任务仍在执行中,等待后重试
1541
+ time.sleep(poll_interval)
1542
+
1543
+ async def _get_result_async(self, full_key: str, event_id: str, delete: bool, delayed_deletion_ex: int,
1544
+ wait: bool = False, timeout: int = 300, poll_interval: float = 0.5):
1545
+ """异步获取任务结果(支持等待模式)"""
1546
+
1547
+ # 使用二进制客户端,不自动解码(因为 result 是 msgpack 序列化的)
1548
+ client = self.async_binary_redis
1549
+ start_time = time.time()
1550
+
1551
+ while True:
1552
+ # 获取整个 hash 的所有字段
1553
+ task_data = await client.hgetall(full_key)
1554
+
1555
+ if not task_data:
1556
+ if wait:
1557
+ raise TaskNotFoundError(f"Task {event_id} not found")
1558
+ return None
1559
+
1560
+ # 解码字节字段
1561
+ decoded_data = {}
1562
+ for key, value in task_data.items():
1563
+ # 解码 key
1564
+ if isinstance(key, bytes):
1565
+ key = key.decode('utf-8')
1566
+
1567
+ # 跳过内部标记字段
1568
+ if key.startswith('__'):
1569
+ continue
1570
+
1571
+ # 解码 value - 只有 result 字段需要 loads_str
1572
+ if isinstance(value, bytes):
1573
+ if key == 'result':
1574
+ try:
1575
+ decoded_data[key] = loads_str(value)
1576
+ except Exception:
1577
+ decoded_data[key] = value
1578
+ else:
1579
+ # 其他字段尝试 UTF-8 解码
1580
+ try:
1581
+ decoded_data[key] = value.decode('utf-8')
1582
+ except Exception:
1583
+ decoded_data[key] = value
1584
+ else:
1585
+ decoded_data[key] = value
1586
+
1587
+ # 如果不需要等待,处理删除逻辑后直接返回
1588
+ if not wait:
1589
+ if delayed_deletion_ex is not None:
1590
+ await client.expire(full_key, delayed_deletion_ex)
1591
+ elif delete:
1592
+ if self.task_center and self.task_center.is_enabled:
1593
+ await client.hset(full_key, "__pending_delete", "1")
1594
+ else:
1595
+ await client.delete(full_key)
1596
+ return decoded_data
1597
+
1598
+ # 需要等待:检查任务状态
1599
+ status = decoded_data.get('status')
1600
+
1601
+ # 检查任务是否完成
1602
+ if self._is_task_completed(status):
1603
+ # 任务成功完成,处理删除逻辑后返回
1604
+ if delayed_deletion_ex is not None:
1605
+ await client.expire(full_key, delayed_deletion_ex)
1606
+ elif delete:
1607
+ if self.task_center and self.task_center.is_enabled:
1608
+ await client.hset(full_key, "__pending_delete", "1")
1609
+ else:
1610
+ await client.delete(full_key)
1611
+ return decoded_data
1612
+
1613
+ elif self._is_task_failed(status):
1614
+ # 任务失败,抛出异常
1615
+ error_msg = decoded_data.get('error_msg', 'Task execution failed')
1616
+ raise TaskExecutionError(event_id, error_msg)
1617
+
1618
+ # 检查超时
1619
+ if time.time() - start_time > timeout:
1620
+ raise TaskTimeoutError(f"Task {event_id} timed out after {timeout} seconds")
1621
+
1622
+ # 等待后重试
1623
+ await asyncio.sleep(poll_interval)
1624
+
860
1625
  def register_router(self, router, prefix: str = None):
861
1626
  """
862
1627
  注册任务路由器
@@ -879,7 +1644,7 @@ class Jettask(object):
879
1644
  app = Jettask(redis_url="redis://localhost:6379/0")
880
1645
  app.register_router(email_router)
881
1646
  """
882
- from ..router import TaskRouter
1647
+ from ..task.router import TaskRouter
883
1648
 
884
1649
  if not isinstance(router, TaskRouter):
885
1650
  raise TypeError("router must be a TaskRouter instance")
@@ -899,7 +1664,7 @@ class Jettask(object):
899
1664
 
900
1665
  # 注册任务
901
1666
  task = self._task_from_fun(func, name, None, queue, **task_config)
902
- logger.info(f"Registered task: {name} (queue: {queue or self.redis_prefix})")
1667
+ logger.debug(f"Registered task: {name} (queue: {queue or self.redis_prefix})")
903
1668
 
904
1669
  return self
905
1670
 
@@ -942,25 +1707,159 @@ class Jettask(object):
942
1707
  error_msg += f"3. 或者将这些任务的队列从监听列表中移除\n"
943
1708
  raise ValueError(error_msg)
944
1709
 
945
- def _start(
1710
+
1711
+ def _start_with_heartbeat_thread(
946
1712
  self,
947
- execute_type: str = "asyncio",
1713
+ execute_type: str = "multi_asyncio",
948
1714
  queues: List[str] = None,
949
1715
  concurrency: int = 1,
950
1716
  prefetch_multiplier: int = 1,
951
- **kwargs
952
1717
  ):
1718
+ """在主进程中启动执行器和心跳线程"""
1719
+ from jettask.worker.lifecycle import HeartbeatThreadManager
1720
+
1721
+ # 1. 初始化 Worker ID - 复用 EventPool 中的 HeartbeatConsumerStrategy
1722
+ # 确保 EventPool 已初始化
1723
+ if not self.ep:
1724
+ raise RuntimeError("EventPool not initialized")
1725
+
1726
+ # 使用 EventPool 中已创建的 HeartbeatConsumerStrategy
1727
+ if not self.consumer_manager or not self.consumer_manager._heartbeat_strategy:
1728
+ raise RuntimeError("ConsumerManager or HeartbeatConsumerStrategy not initialized")
1729
+
1730
+ strategy = self.consumer_manager._heartbeat_strategy
1731
+ strategy._ensure_consumer_id()
1732
+ worker_id = strategy.consumer_id
1733
+
1734
+ logger.info(f"Starting worker {worker_id} in main process (PID: {os.getpid()})")
1735
+
1736
+ # 2. 启动心跳线程(在主进程中)
1737
+ heartbeat = HeartbeatThreadManager(
1738
+ redis_client=self.redis,
1739
+ worker_key=f"{self.redis_prefix}:WORKER:{worker_id}",
1740
+ worker_id=worker_id,
1741
+ redis_prefix=self.redis_prefix,
1742
+ interval=5.0
1743
+ )
1744
+ heartbeat.start()
1745
+
1746
+ # 保存引用以便清理
1747
+ self._heartbeat_manager = heartbeat
1748
+ self._executor_processes = []
1749
+
1750
+ try:
1751
+ # 3. 启动多进程执行器(直接调用 _start)
1752
+ self._start(
1753
+ execute_type=execute_type,
1754
+ queues=queues,
1755
+ concurrency=concurrency,
1756
+ prefetch_multiplier=prefetch_multiplier,
1757
+ )
1758
+ except KeyboardInterrupt:
1759
+ logger.info("Received interrupt signal")
1760
+ finally:
1761
+ # 4. 清理
1762
+ logger.info("Shutting down worker...")
1763
+
1764
+ # 停止心跳线程
1765
+ if hasattr(self, '_heartbeat_manager'):
1766
+ logger.debug("Stopping heartbeat thread...")
1767
+ self._heartbeat_manager.stop(timeout=2.0)
1768
+
1769
+ # 清理资源
1770
+ self.cleanup()
1771
+
1772
+ # ==================== 新的子方法:重构后的 Worker 启动逻辑 ====================
1773
+
1774
+ def _generate_worker_id_lightweight(self) -> tuple:
1775
+ """
1776
+ 轻量级生成 Worker ID(不初始化 EventPool)
1777
+
1778
+ 这个方法只生成 worker_id,不会初始化 EventPool、ConsumerManager 等重量级组件。
1779
+ 用于主进程在 fork 子进程前生成 worker_id。
1780
+
1781
+ Returns:
1782
+ (worker_id, worker_key) 元组
1783
+ """
1784
+ from jettask.worker.manager import WorkerNaming
1785
+ import asyncio
1786
+ import socket
1787
+
1788
+ # 生成主机名前缀(与 HeartbeatConsumerStrategy 相同的逻辑)
1789
+ try:
1790
+ hostname = socket.gethostname()
1791
+ ip = socket.gethostbyname(hostname)
1792
+ prefix = hostname if hostname != 'localhost' else ip
1793
+ except:
1794
+ prefix = os.environ.get('HOSTNAME', 'unknown')
1795
+
1796
+ # 创建轻量级的 worker naming
1797
+ naming = WorkerNaming()
1798
+
1799
+ # 尝试复用离线的 worker ID
1800
+ reusable_id = None
1801
+ try:
1802
+ loop = asyncio.get_event_loop()
1803
+ if loop.is_running():
1804
+ logger.warning("Event loop is running, cannot reuse offline worker ID synchronously")
1805
+ else:
1806
+ # 直接使用 WorkerNaming 查找可复用的 ID
1807
+ reusable_id = loop.run_until_complete(
1808
+ naming.find_reusable_worker_id(
1809
+ prefix=prefix,
1810
+ worker_state=self.worker_state
1811
+ )
1812
+ )
1813
+ except RuntimeError:
1814
+ # 没有事件循环,创建新的
1815
+ loop = asyncio.new_event_loop()
1816
+ asyncio.set_event_loop(loop)
1817
+ try:
1818
+ reusable_id = loop.run_until_complete(
1819
+ naming.find_reusable_worker_id(
1820
+ prefix=prefix,
1821
+ worker_state=self.worker_state
1822
+ )
1823
+ )
1824
+ finally:
1825
+ loop.close()
1826
+
1827
+ # 生成或复用 worker_id
1828
+ if reusable_id:
1829
+ logger.info(f"[PID {os.getpid()}] Reusing offline worker ID: {reusable_id}")
1830
+ worker_id = reusable_id
1831
+ else:
1832
+ worker_id = naming.generate_worker_id(prefix)
1833
+ logger.info(f"[PID {os.getpid()}] Generated new worker ID: {worker_id}")
1834
+
1835
+ worker_key = f"{self.redis_prefix}:WORKER:{worker_id}"
1836
+
1837
+ # 保存到实例
1838
+ self.worker_id = worker_id
1839
+ self.worker_key = worker_key
1840
+
1841
+ return worker_id, worker_key
1842
+
1843
+ def _initialize_worker(self, queues: List[str] = None) -> tuple:
1844
+ """
1845
+ 初始化 Worker(完整版本,包含 EventPool 初始化)
1846
+
1847
+ 注意:这个方法会初始化 EventPool 和 ConsumerManager,会创建事件循环等状态。
1848
+ 在多进程模式下,这些状态会被 fork 到子进程,需要在子进程中清理。
1849
+
1850
+ Returns:
1851
+ (worker_id, worker_key) 元组
1852
+ """
953
1853
  # 设置默认队列
954
1854
  if not queues:
955
1855
  queues = [self.redis_prefix]
956
-
1856
+
1857
+ # 初始化 EventPool
957
1858
  self.ep.queues = queues
958
1859
  self.ep.init_routing()
959
1860
  self._mount_module()
960
- # 验证任务兼容性
961
- self._validate_tasks_for_executor(execute_type, queues)
962
-
963
- # 收集每个队列上的所有任务(用于广播支持)
1861
+
1862
+ # 收集任务列表(按队列分组)
964
1863
  self._tasks_by_queue = {}
965
1864
  for task_name, task in self._tasks.items():
966
1865
  task_queue = task.queue or self.redis_prefix
@@ -968,72 +1867,276 @@ class Jettask(object):
968
1867
  if task_queue not in self._tasks_by_queue:
969
1868
  self._tasks_by_queue[task_queue] = []
970
1869
  self._tasks_by_queue[task_queue].append(task_name)
971
- logger.debug(f"Task {task_name} listening on queue {task_queue}")
972
-
973
- event_queue = deque()
974
-
975
- # 消费者组会在listening_event方法内部自动创建
976
-
977
- # 根据执行器类型创建对应的执行器
978
- if execute_type == "asyncio":
979
- # 对于asyncio执行器,使用asyncio.Queue
980
- async_event_queue = asyncio.Queue()
981
-
982
- async def run_asyncio_executor():
983
- # 启动异步事件监听
984
- asyncio.create_task(self.ep.listening_event(async_event_queue, prefetch_multiplier))
985
- # 创建并运行执行器
986
- executor = AsyncioExecutor(async_event_queue, self, concurrency)
987
- await executor.loop()
988
-
989
- try:
990
- loop = asyncio.get_event_loop()
991
- if loop.is_running():
992
- # 如果事件循环已经在运行,创建一个新的
993
- loop = asyncio.new_event_loop()
994
- asyncio.set_event_loop(loop)
995
- except RuntimeError:
996
- # 如果当前线程没有事件循环,创建一个新的
997
- loop = asyncio.new_event_loop()
998
- asyncio.set_event_loop(loop)
999
-
1000
- try:
1001
- loop.run_until_complete(run_asyncio_executor())
1002
- except RuntimeError as e:
1003
- if "Event loop stopped" in str(e):
1004
- logger.info("Event loop stopped, shutting down gracefully")
1870
+ logger.debug(f"Task {task_name} -> queue {task_queue}")
1871
+
1872
+ # 创建 Worker ID - 复用 EventPool 中的 HeartbeatConsumerStrategy
1873
+ # 确保 EventPool 已初始化
1874
+ if not self.ep:
1875
+ raise RuntimeError("EventPool not initialized")
1876
+
1877
+ # 使用 EventPool 中已创建的 HeartbeatConsumerStrategy
1878
+ if not self.consumer_manager or not self.consumer_manager._heartbeat_strategy:
1879
+ raise RuntimeError("ConsumerManager or HeartbeatConsumerStrategy not initialized")
1880
+
1881
+ strategy = self.consumer_manager._heartbeat_strategy
1882
+
1883
+ # 尝试复用离线的 worker ID
1884
+ import asyncio
1885
+ try:
1886
+ loop = asyncio.get_event_loop()
1887
+ if loop.is_running():
1888
+ # 如果事件循环正在运行,无法同步调用异步方法
1889
+ # 直接生成新的 ID(这种情况通常不会发生)
1890
+ logger.warning("Event loop is running, cannot reuse offline worker ID synchronously")
1891
+ strategy._ensure_consumer_id()
1892
+ else:
1893
+ # 事件循环未运行,可以使用 run_until_complete
1894
+ reusable_id = loop.run_until_complete(self._find_reusable_worker_id_async(strategy))
1895
+ if reusable_id:
1896
+ logger.info(f"[PID {os.getpid()}] Reusing offline worker ID: {reusable_id}")
1897
+ strategy.consumer_id = reusable_id
1898
+ strategy._worker_key = f'{self.redis_prefix}:WORKER:{reusable_id}'
1005
1899
  else:
1006
- raise
1007
- elif execute_type == "multi_asyncio":
1008
- # multi_asyncio在每个子进程中会启动自己的监听器
1009
- executor = MultiAsyncioExecutor(event_queue, self, concurrency)
1010
- executor.prefetch_multiplier = prefetch_multiplier
1011
-
1012
- # 设置信号处理器以正确响应Ctrl+C
1013
- def multi_asyncio_signal_handler(signum, _frame):
1014
- logger.info(f"Multi-asyncio mode received signal {signum}")
1015
- executor._main_received_signal = True
1016
- executor.shutdown_event.set()
1017
- # 强制退出主循环
1018
- raise KeyboardInterrupt()
1019
-
1020
- signal.signal(signal.SIGINT, multi_asyncio_signal_handler)
1021
- signal.signal(signal.SIGTERM, multi_asyncio_signal_handler)
1022
-
1900
+ strategy._ensure_consumer_id()
1901
+ except RuntimeError:
1902
+ # 没有事件循环,创建新的
1903
+ loop = asyncio.new_event_loop()
1904
+ asyncio.set_event_loop(loop)
1023
1905
  try:
1024
- executor.loop()
1025
- except KeyboardInterrupt:
1026
- logger.info("Multi-asyncio mode interrupted")
1906
+ reusable_id = loop.run_until_complete(self._find_reusable_worker_id_async(strategy))
1907
+ if reusable_id:
1908
+ logger.info(f"[PID {os.getpid()}] Reusing offline worker ID: {reusable_id}")
1909
+ strategy.consumer_id = reusable_id
1910
+ strategy._worker_key = f'{self.redis_prefix}:WORKER:{reusable_id}'
1911
+ else:
1912
+ strategy._ensure_consumer_id()
1027
1913
  finally:
1914
+ loop.close()
1915
+
1916
+ worker_id = strategy.consumer_id
1917
+ worker_key = f"{self.redis_prefix}:WORKER:{worker_id}"
1918
+
1919
+ # 保存 worker_id 到实例,供子进程使用
1920
+ self.worker_id = worker_id
1921
+ self.worker_key = worker_key
1922
+
1923
+ logger.info(f"Worker initialized: {worker_id} (PID: {os.getpid()})")
1924
+ return worker_id, worker_key
1925
+
1926
+ async def _find_reusable_worker_id_async(self, strategy) -> str:
1927
+ """
1928
+ 异步查找可复用的离线 worker ID
1929
+
1930
+ Args:
1931
+ strategy: HeartbeatConsumerStrategy 实例
1932
+
1933
+ Returns:
1934
+ 可复用的 worker ID,如果没有则返回 None
1935
+ """
1936
+ from jettask.worker.manager import WorkerNaming
1937
+ naming = WorkerNaming()
1938
+ return await naming.find_reusable_worker_id(
1939
+ prefix=strategy.hostname_prefix,
1940
+ worker_state=self.worker_state
1941
+ )
1942
+
1943
+ def _start_heartbeat_thread_v2(self, worker_id: str, worker_key: str, queues: List[str] = None):
1944
+ """
1945
+ 启动心跳线程
1946
+
1947
+ Args:
1948
+ worker_id: Worker ID
1949
+ worker_key: Worker Redis key
1950
+ queues: Worker 负责的队列列表(用于消息恢复)
1951
+
1952
+ Returns:
1953
+ HeartbeatThreadManager 实例
1954
+ """
1955
+ from jettask.worker.lifecycle import HeartbeatThreadManager
1956
+
1957
+ heartbeat = HeartbeatThreadManager(
1958
+ redis_client=self.redis,
1959
+ worker_key=worker_key,
1960
+ worker_id=worker_id,
1961
+ redis_prefix=self.redis_prefix,
1962
+ interval=5.0
1963
+ )
1964
+
1965
+ # 在启动心跳线程前设置 queues(确保第一次心跳就能写入 Redis)
1966
+ if queues:
1967
+ for queue in queues:
1968
+ heartbeat.queues.add(queue)
1969
+ logger.debug(f"Configured queues for heartbeat: {queues}")
1970
+
1971
+ heartbeat.start()
1972
+ logger.info(f"Heartbeat thread started for worker {worker_id}")
1973
+ return heartbeat
1974
+
1975
+ def _create_executor(self, concurrency: int):
1976
+ """
1977
+ 创建进程编排器实例
1978
+
1979
+ Returns:
1980
+ ProcessOrchestrator 实例
1981
+ """
1982
+ # 创建 ProcessOrchestrator(多进程管理器)
1983
+ orchestrator = ProcessOrchestrator(self, concurrency)
1984
+
1985
+ # 保存 orchestrator 引用
1986
+ self._current_executor = orchestrator
1987
+
1988
+ # 设置信号处理器
1989
+ def signal_handler(signum, frame):
1990
+ logger.info(f"Main process received signal {signum}, initiating shutdown...")
1991
+ self._should_exit = True
1992
+ orchestrator.shutdown_event.set()
1993
+ raise KeyboardInterrupt()
1994
+
1995
+ signal.signal(signal.SIGINT, signal_handler)
1996
+ signal.signal(signal.SIGTERM, signal_handler)
1997
+
1998
+ return orchestrator
1999
+
2000
+ def _cleanup_worker_v3(self, heartbeat_managers: list, executor, worker_ids: list):
2001
+ """
2002
+ 清理 Worker 资源(新版本,支持多个心跳线程)
2003
+ """
2004
+ logger.info("Shutting down workers...")
2005
+
2006
+ # 1. 停止所有心跳线程
2007
+ if heartbeat_managers:
2008
+ logger.info(f"Stopping {len(heartbeat_managers)} heartbeat threads...")
2009
+ for i, heartbeat in enumerate(heartbeat_managers):
2010
+ try:
2011
+ worker_id = worker_ids[i][0] if i < len(worker_ids) else f"worker_{i}"
2012
+ logger.debug(f"Stopping heartbeat thread for {worker_id}...")
2013
+ heartbeat.stop(timeout=2.0)
2014
+ except Exception as e:
2015
+ logger.error(f"Error stopping heartbeat #{i}: {e}", exc_info=True)
2016
+
2017
+ # 2. 关闭执行器
2018
+ if executor:
2019
+ try:
2020
+ logger.debug("Shutting down executor...")
1028
2021
  executor.shutdown()
1029
- else:
1030
- raise ValueError(f"不支持的执行器类型:{execute_type},仅支持 'asyncio' 'multi_asyncio'")
2022
+ except Exception as e:
2023
+ logger.error(f"Error shutting down executor: {e}", exc_info=True)
2024
+
2025
+ # 3. 关闭 Redis 连接
2026
+ try:
2027
+ logger.debug("Closing Redis connections...")
2028
+ self._cleanup_redis_connections_v2()
2029
+ except Exception as e:
2030
+ logger.error(f"Error closing Redis connections: {e}", exc_info=True)
2031
+
2032
+ # 4. 调用通用清理
2033
+ try:
2034
+ self.cleanup()
2035
+ except Exception as e:
2036
+ logger.error(f"Error in cleanup: {e}", exc_info=True)
2037
+
2038
+ logger.info(f"All {len(worker_ids)} workers shutdown complete")
2039
+
2040
+ def _cleanup_redis_connections_v2(self):
2041
+ """清理 Redis 连接(异步包装)"""
2042
+ async def async_cleanup_redis():
2043
+ """异步关闭 Redis 连接"""
2044
+ try:
2045
+ logger.debug("Closing async Redis connections...")
2046
+
2047
+ # 关闭 EventPool 的连接
2048
+ if hasattr(self.ep, 'async_redis_client'):
2049
+ await self.ep.async_redis_client.aclose()
2050
+
2051
+ if hasattr(self.ep, 'async_binary_redis_client'):
2052
+ await self.ep.async_binary_redis_client.aclose()
2053
+
2054
+ # 关闭 app 级别的连接
2055
+ if hasattr(self, '_async_redis'):
2056
+ await self._async_redis.aclose()
2057
+
2058
+ logger.debug("Async Redis connections closed")
2059
+ except Exception as e:
2060
+ logger.error(f"Error closing async Redis: {e}", exc_info=True)
2061
+
2062
+ # 在新的事件循环中执行异步清理
2063
+ try:
2064
+ import asyncio
2065
+ cleanup_loop = asyncio.new_event_loop()
2066
+ asyncio.set_event_loop(cleanup_loop)
2067
+ cleanup_loop.run_until_complete(async_cleanup_redis())
2068
+ cleanup_loop.close()
2069
+ except Exception as e:
2070
+ logger.error(f"Error in Redis cleanup: {e}", exc_info=True)
2071
+
2072
+
2073
+ def _start(self, queues: List[str] = None, concurrency: int = 1, prefetch_multiplier: int = 1):
2074
+ """
2075
+ 启动 Worker 主逻辑(多进程模式,主进程调用)
2076
+
2077
+ 新架构流程:
2078
+ 1. 为每个子进程生成独立的 Worker ID
2079
+ 2. 在主进程为每个子进程启动独立的心跳线程
2080
+ 3. 创建执行器
2081
+ 4. Fork 并启动子进程,传递对应的 worker_id
2082
+ 5. 等待退出信号(阻塞)
2083
+ 6. 清理资源
2084
+
2085
+ Args:
2086
+ queues: 监听的队列列表
2087
+ concurrency: 并发执行器进程数(子进程数量)
2088
+ prefetch_multiplier: 预取倍数
2089
+ """
2090
+ heartbeat_managers = []
2091
+ executor = None
2092
+ worker_ids = []
2093
+
2094
+ try:
2095
+ # 1. 为每个子进程生成独立的 Worker ID 并启动心跳线程
2096
+ logger.info(f"Generating {concurrency} worker IDs and starting heartbeat threads...")
2097
+ from jettask.worker.lifecycle import HeartbeatThreadManager
2098
+
2099
+ for i in range(concurrency):
2100
+ # 使用新方法:生成 worker_id 并启动心跳,等待首次心跳成功
2101
+ heartbeat = HeartbeatThreadManager.create_and_start(
2102
+ redis_client=self.redis,
2103
+ redis_prefix=self.redis_prefix,
2104
+ queues=queues,
2105
+ interval=5.0,
2106
+ worker_state=self.worker_state
2107
+ )
2108
+ # 从心跳管理器对象中获取 worker_id 和 worker_key
2109
+ worker_ids.append((heartbeat.worker_id, heartbeat.worker_key))
2110
+ heartbeat_managers.append(heartbeat)
2111
+ logger.info(f" Process #{i}: worker_id={heartbeat.worker_id} (heartbeat started)")
2112
+
2113
+ # 2. 创建执行器
2114
+ executor = self._create_executor(concurrency)
2115
+
2116
+ # 3. 启动 ProcessOrchestrator(阻塞调用,会fork多个子进程并运行)
2117
+ # 传递 worker_ids 列表,每个子进程使用对应的 worker_id
2118
+ logger.info(f"Starting {concurrency} executor processes...")
2119
+
2120
+ executor.start(
2121
+ queues=queues,
2122
+ prefetch_multiplier=prefetch_multiplier,
2123
+ worker_ids=worker_ids # 传递 worker_ids 列表
2124
+ )
2125
+
2126
+ except KeyboardInterrupt:
2127
+ logger.info("Worker interrupted by keyboard")
2128
+ except Exception as e:
2129
+ logger.error(f"Error in worker main loop: {e}", exc_info=True)
2130
+ finally:
2131
+ # 5. 清理资源
2132
+ self._cleanup_worker_v3(heartbeat_managers, executor, worker_ids)
2133
+
2134
+ # ==================== 旧方法(待废弃) ====================
1031
2135
 
1032
2136
  def _run_subprocess(self, *args, **kwargs):
1033
- # logger.info("Started Worker Process")
1034
- process = multiprocessing.Process(target=self._start, args=args, kwargs=kwargs)
1035
- process.start()
1036
- return process
2137
+ """已废弃:不再使用子进程"""
2138
+ _ = (args, kwargs) # 避免未使用警告
2139
+ raise DeprecationWarning("_run_subprocess is deprecated, use _start_with_heartbeat_thread instead")
1037
2140
 
1038
2141
  def start(
1039
2142
  self,
@@ -1043,48 +2146,28 @@ class Jettask(object):
1043
2146
  prefetch_multiplier: int = 1,
1044
2147
  reload: bool = False,
1045
2148
  ):
2149
+ """启动 Worker(仅支持 multi_asyncio)"""
2150
+ _ = (reload, execute_type) # 参数已废弃,避免未使用警告
2151
+
1046
2152
  # 标记worker已启动
1047
2153
  self._worker_started = True
1048
-
2154
+
1049
2155
  # 如果配置了任务中心且配置尚未加载,从任务中心获取配置
1050
2156
  if self.task_center and self.task_center.is_enabled and not self._task_center_config:
1051
2157
  self._load_config_from_task_center()
1052
-
2158
+
1053
2159
  # 注册清理处理器(只在启动worker时注册)
1054
2160
  self._setup_cleanup_handlers()
1055
-
1056
- if execute_type == "multi_asyncio" and self.consumer_strategy == "pod":
2161
+
2162
+ if self.consumer_strategy == "pod":
1057
2163
  raise ValueError("multi_asyncio模式下无法使用pod策略")
1058
- self.process = self._run_subprocess(
1059
- execute_type=execute_type,
2164
+
2165
+ # 使用重构后的 _start() 方法
2166
+ self._start(
1060
2167
  queues=queues,
1061
2168
  concurrency=concurrency,
1062
2169
  prefetch_multiplier=prefetch_multiplier,
1063
2170
  )
1064
- if reload:
1065
- event_handler = FileChangeHandler(
1066
- self,
1067
- execute_type=execute_type,
1068
- queues=queues,
1069
- concurrency=concurrency,
1070
- prefetch_multiplier=prefetch_multiplier,
1071
- )
1072
- observer = Observer()
1073
- observer.schedule(event_handler, ".", recursive=True)
1074
- observer.start()
1075
- # 使用事件来等待,而不是无限循环
1076
- try:
1077
- while not self._should_exit:
1078
- time.sleep(0.1) # 短暂睡眠,快速响应退出信号
1079
- except KeyboardInterrupt:
1080
- logger.info("Received keyboard interrupt")
1081
- self.cleanup()
1082
- if self.process and self.process.is_alive():
1083
- self.process.terminate()
1084
- self.process.join(timeout=2)
1085
- if self.process.is_alive():
1086
- logger.warning("Process did not terminate, killing...")
1087
- self.process.kill()
1088
2171
 
1089
2172
 
1090
2173
  def get_task_info(self, event_id: str, asyncio: bool = False):
@@ -1097,16 +2180,20 @@ class Jettask(object):
1097
2180
  return client.hgetall(key)
1098
2181
 
1099
2182
  def get_task_status(self, event_id: str, asyncio: bool = False):
1100
- """获取任务状态(从TASK:hash的status字段)"""
2183
+ """获取任务状态(从TASK:hash的status字段)
2184
+
2185
+ 注意:这个方法使用简化的 key 格式 TASK:{event_id}
2186
+ 如果需要获取带 group_name 的任务状态,请使用 _get_task_status_sync 或 _get_task_status_async
2187
+ """
1101
2188
  if asyncio:
1102
- return self._get_task_status_async(event_id)
2189
+ return self._get_task_status_simple_async(event_id)
1103
2190
  else:
1104
2191
  client = self.redis
1105
2192
  key = f"{self.redis_prefix}:TASK:{event_id}"
1106
2193
  return client.hget(key, "status")
1107
2194
 
1108
- async def _get_task_status_async(self, event_id: str):
1109
- """异步获取任务状态"""
2195
+ async def _get_task_status_simple_async(self, event_id: str):
2196
+ """异步获取任务状态(简化版本,不需要 task_name 和 queue)"""
1110
2197
  key = f"{self.redis_prefix}:TASK:{event_id}"
1111
2198
  return await self.async_redis.hget(key, "status")
1112
2199
 
@@ -1152,199 +2239,18 @@ class Jettask(object):
1152
2239
  def get_redis_client(self, asyncio: bool = False):
1153
2240
  return self.async_redis if asyncio else self.redis
1154
2241
 
1155
- def set_data(
1156
- self, event_id: str, result: str, ex: int = 3600, asyncio: bool = False
1157
- ):
1158
- """设置任务结果(写入TASK:hash的result字段)"""
1159
- client = self.get_redis_client(asyncio)
1160
- key = f"{self.redis_prefix}:TASK:{event_id}"
1161
- if asyncio:
1162
- return self._set_data_async(key, result, ex)
1163
- else:
1164
- client.hset(key, "result", result)
1165
- return client.expire(key, ex)
1166
-
1167
- async def _set_data_async(self, key: str, result: str, ex: int):
1168
- """异步设置任务结果"""
1169
- await self.async_redis.hset(key, "result", result)
1170
- return await self.async_redis.expire(key, ex)
1171
-
1172
2242
  async def get_and_delayed_deletion(self, key: str, ex: int):
1173
2243
  """获取结果并延迟删除(从hash中)"""
1174
2244
  result = await self.async_redis.hget(key, "result")
1175
2245
  await self.async_redis.expire(key, ex)
1176
2246
  return result
1177
-
1178
- async def _get_result_async(self, key: str, delete: bool, delayed_deletion_ex: int):
1179
- """异步获取任务结果"""
1180
- client = self.async_redis
1181
- if delayed_deletion_ex is not None:
1182
- result = await client.hget(key, "result")
1183
- await client.expire(key, delayed_deletion_ex)
1184
- return result
1185
- elif delete:
1186
- # 获取结果并删除整个hash
1187
- result = await client.hget(key, "result")
1188
- await client.delete(key)
1189
- return result
1190
- else:
1191
- return await client.hget(key, "result")
1192
-
1193
- def get_result(self, event_id: str, delete: bool = False, asyncio: bool = False,
1194
- delayed_deletion_ex: int = None, wait: bool = False, timeout: int = 300,
1195
- poll_interval: float = 0.5):
1196
- """获取任务结果(从TASK:hash的result字段)
1197
-
1198
- Args:
1199
- event_id: 任务ID
1200
- delete: 是否删除结果
1201
- asyncio: 是否使用异步模式
1202
- delayed_deletion_ex: 延迟删除时间(秒)
1203
- wait: 是否阻塞等待直到任务完成
1204
- timeout: 等待超时时间(秒),默认300秒
1205
- poll_interval: 轮询间隔(秒),默认0.5秒
1206
- suppress_traceback: 是否抑制框架层堆栈(直接打印错误并退出)
1207
-
1208
- Returns:
1209
- 任务结果字符串
1210
-
1211
- Raises:
1212
- TaskTimeoutError: 等待超时
1213
- TaskExecutionError: 任务执行失败
1214
- TaskNotFoundError: 任务不存在
1215
- """
1216
- if asyncio:
1217
- key = f"{self.redis_prefix}:TASK:{event_id}"
1218
- if wait:
1219
- return self._get_result_async_wait(event_id, key, delete, delayed_deletion_ex, timeout, poll_interval)
1220
- else:
1221
- return self._get_result_async(key, delete, delayed_deletion_ex)
1222
- else:
1223
- # 同步模式
1224
- if wait:
1225
- return self._get_result_sync_wait(event_id, delete, delayed_deletion_ex, timeout, poll_interval)
1226
- else:
1227
- client = self.redis
1228
- key = f"{self.redis_prefix}:TASK:{event_id}"
1229
- if delayed_deletion_ex is not None:
1230
- result = client.hget(key, "result")
1231
- client.expire(key, delayed_deletion_ex)
1232
- return result
1233
- elif delete:
1234
- # 如果配置了任务中心,不删除消息,等任务中心同步后删除
1235
- if self.task_center and self.task_center.is_enabled:
1236
- result = client.hget(key, "result")
1237
- # 仅标记为待删除,不实际删除
1238
- client.hset(key, "__pending_delete", "1")
1239
- return result
1240
- else:
1241
- # 获取结果并删除整个hash
1242
- result = client.hget(key, "result")
1243
- client.delete(key)
1244
- return result
1245
- else:
1246
- # 先尝试从Redis获取
1247
- result = client.hget(key, "result")
1248
- # 如果Redis中没有且配置了任务中心,从任务中心获取
1249
- if result is None and self.task_center_client.is_enabled:
1250
- import asyncio
1251
- loop = asyncio.new_event_loop()
1252
- try:
1253
- task_data = loop.run_until_complete(
1254
- self.task_center_client.get_task_result(event_id)
1255
- )
1256
- if task_data:
1257
- result = task_data.get('result')
1258
- finally:
1259
- loop.close()
1260
- return result
1261
-
1262
- def _get_result_sync_wait(self, event_id: str, delete: bool, delayed_deletion_ex: int,
1263
- timeout: int, poll_interval: float):
1264
- """同步模式下阻塞等待任务结果"""
1265
- start_time = time.time()
1266
-
1267
- while True:
1268
- # 检查超时
1269
- if time.time() - start_time > timeout:
1270
- raise TaskTimeoutError(f"Task {event_id} timed out after {timeout} seconds")
1271
-
1272
- # 获取任务状态
1273
- status = self.get_task_status(event_id)
1274
-
1275
- if status is None:
1276
- raise TaskNotFoundError(f"Task {event_id} not found")
1277
-
1278
- if status == 'success':
1279
- # 任务成功,获取结果
1280
- key = f"{self.redis_prefix}:TASK:{event_id}"
1281
- if delayed_deletion_ex is not None:
1282
- result = self.redis.hget(key, "result")
1283
- self.redis.expire(key, delayed_deletion_ex)
1284
- return result
1285
- elif delete:
1286
- result = self.redis.hget(key, "result")
1287
- self.redis.delete(key)
1288
- return result
1289
- else:
1290
- return self.redis.hget(key, "result")
1291
-
1292
- elif status == 'error':
1293
- # 任务失败,获取错误信息并抛出异常
1294
- key = f"{self.redis_prefix}:TASK:{event_id}"
1295
- # 从 exception 字段获取错误信息
1296
- error_msg = self.redis.hget(key, "exception") or "Task execution failed"
1297
- # 抛出自定义异常
1298
- raise TaskExecutionError(event_id, error_msg)
1299
-
1300
- # 任务还在执行中,继续等待
1301
- time.sleep(poll_interval)
1302
-
1303
- async def _get_result_async_wait(self, event_id: str, key: str, delete: bool,
1304
- delayed_deletion_ex: int, timeout: int, poll_interval: float):
1305
- """异步模式下等待任务结果"""
1306
- start_time = time.time()
1307
-
1308
- while True:
1309
- # 检查超时
1310
- if time.time() - start_time > timeout:
1311
- raise TaskTimeoutError(f"Task {event_id} timed out after {timeout} seconds")
1312
-
1313
- # 获取任务状态
1314
- status = await self._get_task_status_async(event_id)
1315
-
1316
- if status is None:
1317
- raise TaskNotFoundError(f"Task {event_id} not found")
1318
-
1319
- if status == 'success':
1320
- # 任务成功,获取结果
1321
- if delayed_deletion_ex is not None:
1322
- result = await self.async_redis.hget(key, "result")
1323
- await self.async_redis.expire(key, delayed_deletion_ex)
1324
- return result
1325
- elif delete:
1326
- result = await self.async_redis.hget(key, "result")
1327
- await self.async_redis.delete(key)
1328
- return result
1329
- else:
1330
- return await self.async_redis.hget(key, "result")
1331
-
1332
- elif status == 'error':
1333
- # 任务失败,获取错误信息并抛出异常
1334
- # 从 exception 字段获取错误信息
1335
- error_msg = await self.async_redis.hget(key, "exception") or "Task execution failed"
1336
- # 抛出自定义异常
1337
- raise TaskExecutionError(event_id, error_msg)
1338
-
1339
- # 任务还在执行中,继续等待
1340
- await asyncio.sleep(poll_interval)
1341
-
2247
+
1342
2248
  # ==================== 定时任务调度相关 ====================
1343
2249
 
1344
2250
  async def _ensure_scheduler_initialized(self, db_url: str = None):
1345
2251
  """确保调度器已初始化(内部方法)"""
1346
2252
  if not self.scheduler_manager:
1347
- logger.info("Auto-initializing scheduler...")
2253
+ logger.debug("Auto-initializing scheduler...")
1348
2254
  # 优先使用传入的db_url,然后是实例化时的pg_url,最后是环境变量
1349
2255
  if not db_url:
1350
2256
  db_url = self.pg_url or os.environ.get('JETTASK_PG_URL')
@@ -1356,7 +2262,8 @@ class Jettask(object):
1356
2262
  "Or: export JETTASK_PG_URL='postgresql://user:password@localhost:5432/jettask'"
1357
2263
  )
1358
2264
 
1359
- from ..scheduler import TaskScheduler, ScheduledTaskManager
2265
+ from ..scheduler import TaskScheduler
2266
+ from ..scheduler.task_crud import ScheduledTaskManager
1360
2267
 
1361
2268
  # 创建数据库管理器
1362
2269
  self.scheduler_manager = ScheduledTaskManager(db_url)
@@ -1376,7 +2283,7 @@ class Jettask(object):
1376
2283
  )
1377
2284
 
1378
2285
  await self.scheduler.connect()
1379
- logger.info("Scheduler initialized")
2286
+ logger.debug("Scheduler initialized")
1380
2287
 
1381
2288
  async def start_scheduler(self):
1382
2289
  """启动定时任务调度器(自动初始化)"""
@@ -1393,7 +2300,7 @@ class Jettask(object):
1393
2300
  """停止定时任务调度器"""
1394
2301
  if self.scheduler:
1395
2302
  self.scheduler.stop()
1396
- logger.info("Scheduler stopped")
2303
+ logger.debug("Scheduler stopped")
1397
2304
 
1398
2305
  async def add_scheduled_task(
1399
2306
  self,
@@ -1515,9 +2422,9 @@ class Jettask(object):
1515
2422
  task, created = await self.scheduler_manager.create_or_get_task(task, skip_if_exists=skip_if_exists)
1516
2423
 
1517
2424
  if created:
1518
- logger.info(f"Scheduled task {task.id} created for function {task_name}")
2425
+ logger.debug(f"Scheduled task {task.id} created for function {task_name}")
1519
2426
  else:
1520
- logger.info(f"Scheduled task {task.id} already exists for function {task_name}")
2427
+ logger.debug(f"Scheduled task {task.id} already exists for function {task_name}")
1521
2428
 
1522
2429
  return task
1523
2430
 
@@ -1627,7 +2534,7 @@ class Jettask(object):
1627
2534
  # 批量创建
1628
2535
  created_tasks = await self.scheduler_manager.batch_create_tasks(task_objects, skip_existing)
1629
2536
 
1630
- logger.info(f"Batch created {len(created_tasks)} scheduled tasks")
2537
+ logger.debug(f"Batch created {len(created_tasks)} scheduled tasks")
1631
2538
  return created_tasks
1632
2539
 
1633
2540
  async def bulk_write_scheduled_tasks(self, tasks: list):
@@ -1676,7 +2583,7 @@ class Jettask(object):
1676
2583
  skip_existing = getattr(tasks[0], '_skip_if_exists', True) if tasks else True
1677
2584
  created_tasks = await self.scheduler_manager.batch_create_tasks(task_objects, skip_existing)
1678
2585
 
1679
- logger.info(f"Bulk wrote {len(created_tasks)} scheduled tasks")
2586
+ logger.debug(f"Bulk wrote {len(created_tasks)} scheduled tasks")
1680
2587
  return created_tasks
1681
2588
 
1682
2589
  async def list_scheduled_tasks(self, **filters):
@@ -1709,7 +2616,7 @@ class Jettask(object):
1709
2616
  if self.scheduler:
1710
2617
  await self.scheduler.loader.remove_task(task.id)
1711
2618
 
1712
- logger.info(f"Task {task.id} (scheduler_id: {task.scheduler_id}) disabled")
2619
+ logger.debug(f"Task {task.id} (scheduler_id: {task.scheduler_id}) disabled")
1713
2620
  return True
1714
2621
  return False
1715
2622
 
@@ -1730,7 +2637,7 @@ class Jettask(object):
1730
2637
  if self.scheduler:
1731
2638
  await self.scheduler.loader.load_tasks()
1732
2639
 
1733
- logger.info(f"Task {task.id} (scheduler_id: {task.scheduler_id}) enabled")
2640
+ logger.debug(f"Task {task.id} (scheduler_id: {task.scheduler_id}) enabled")
1734
2641
  return True
1735
2642
  return False
1736
2643