jettask 0.2.1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. jettask/constants.py +213 -0
  2. jettask/core/app.py +525 -205
  3. jettask/core/cli.py +193 -185
  4. jettask/core/consumer_manager.py +126 -34
  5. jettask/core/context.py +3 -0
  6. jettask/core/enums.py +137 -0
  7. jettask/core/event_pool.py +501 -168
  8. jettask/core/message.py +147 -0
  9. jettask/core/offline_worker_recovery.py +181 -114
  10. jettask/core/task.py +10 -174
  11. jettask/core/task_batch.py +153 -0
  12. jettask/core/unified_manager_base.py +243 -0
  13. jettask/core/worker_scanner.py +54 -54
  14. jettask/executors/asyncio.py +184 -64
  15. jettask/webui/backend/config.py +51 -0
  16. jettask/webui/backend/data_access.py +2083 -92
  17. jettask/webui/backend/data_api.py +3294 -0
  18. jettask/webui/backend/dependencies.py +261 -0
  19. jettask/webui/backend/init_meta_db.py +158 -0
  20. jettask/webui/backend/main.py +1358 -69
  21. jettask/webui/backend/main_unified.py +78 -0
  22. jettask/webui/backend/main_v2.py +394 -0
  23. jettask/webui/backend/namespace_api.py +295 -0
  24. jettask/webui/backend/namespace_api_old.py +294 -0
  25. jettask/webui/backend/namespace_data_access.py +611 -0
  26. jettask/webui/backend/queue_backlog_api.py +727 -0
  27. jettask/webui/backend/queue_stats_v2.py +521 -0
  28. jettask/webui/backend/redis_monitor_api.py +476 -0
  29. jettask/webui/backend/unified_api_router.py +1601 -0
  30. jettask/webui/db_init.py +204 -32
  31. jettask/webui/frontend/package-lock.json +492 -1
  32. jettask/webui/frontend/package.json +4 -1
  33. jettask/webui/frontend/src/App.css +105 -7
  34. jettask/webui/frontend/src/App.jsx +49 -20
  35. jettask/webui/frontend/src/components/NamespaceSelector.jsx +166 -0
  36. jettask/webui/frontend/src/components/QueueBacklogChart.jsx +298 -0
  37. jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +638 -0
  38. jettask/webui/frontend/src/components/QueueDetailsTable.css +65 -0
  39. jettask/webui/frontend/src/components/QueueDetailsTable.jsx +487 -0
  40. jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +465 -0
  41. jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +423 -0
  42. jettask/webui/frontend/src/components/TaskFilter.jsx +425 -0
  43. jettask/webui/frontend/src/components/TimeRangeSelector.css +21 -0
  44. jettask/webui/frontend/src/components/TimeRangeSelector.jsx +160 -0
  45. jettask/webui/frontend/src/components/layout/AppLayout.css +95 -0
  46. jettask/webui/frontend/src/components/layout/AppLayout.jsx +49 -0
  47. jettask/webui/frontend/src/components/layout/Header.css +34 -10
  48. jettask/webui/frontend/src/components/layout/Header.jsx +31 -23
  49. jettask/webui/frontend/src/components/layout/SideMenu.css +137 -0
  50. jettask/webui/frontend/src/components/layout/SideMenu.jsx +209 -0
  51. jettask/webui/frontend/src/components/layout/TabsNav.css +244 -0
  52. jettask/webui/frontend/src/components/layout/TabsNav.jsx +206 -0
  53. jettask/webui/frontend/src/components/layout/UserInfo.css +197 -0
  54. jettask/webui/frontend/src/components/layout/UserInfo.jsx +197 -0
  55. jettask/webui/frontend/src/contexts/NamespaceContext.jsx +72 -0
  56. jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +245 -0
  57. jettask/webui/frontend/src/main.jsx +1 -0
  58. jettask/webui/frontend/src/pages/Alerts.jsx +684 -0
  59. jettask/webui/frontend/src/pages/Dashboard.jsx +1330 -0
  60. jettask/webui/frontend/src/pages/QueueDetail.jsx +1109 -10
  61. jettask/webui/frontend/src/pages/QueueMonitor.jsx +236 -115
  62. jettask/webui/frontend/src/pages/Queues.jsx +5 -1
  63. jettask/webui/frontend/src/pages/ScheduledTasks.jsx +809 -0
  64. jettask/webui/frontend/src/pages/Settings.jsx +800 -0
  65. jettask/webui/frontend/src/services/api.js +7 -5
  66. jettask/webui/frontend/src/utils/suppressWarnings.js +22 -0
  67. jettask/webui/frontend/src/utils/userPreferences.js +154 -0
  68. jettask/webui/multi_namespace_consumer.py +543 -0
  69. jettask/webui/pg_consumer.py +983 -246
  70. jettask/webui/static/dist/assets/index-7129cfe1.css +1 -0
  71. jettask/webui/static/dist/assets/index-8d1935cc.js +774 -0
  72. jettask/webui/static/dist/index.html +2 -2
  73. jettask/webui/task_center.py +216 -0
  74. jettask/webui/task_center_client.py +150 -0
  75. jettask/webui/unified_consumer_manager.py +193 -0
  76. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/METADATA +1 -1
  77. jettask-0.2.4.dist-info/RECORD +134 -0
  78. jettask/webui/pg_consumer_slow.py +0 -1099
  79. jettask/webui/pg_consumer_test.py +0 -678
  80. jettask/webui/static/dist/assets/index-823408e8.css +0 -1
  81. jettask/webui/static/dist/assets/index-9968b0b8.js +0 -543
  82. jettask/webui/test_pg_consumer_recovery.py +0 -547
  83. jettask/webui/test_recovery_simple.py +0 -492
  84. jettask/webui/test_self_recovery.py +0 -467
  85. jettask-0.2.1.dist-info/RECORD +0 -91
  86. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/WHEEL +0 -0
  87. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/entry_points.txt +0 -0
  88. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/licenses/LICENSE +0 -0
  89. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,147 @@
1
+ """
2
+ 任务消息类 - 完全独立的任务发送对象
3
+ 与task定义完全解耦,可以在任何项目中使用
4
+ """
5
+ from typing import Optional, Dict, Any, Tuple
6
+ from dataclasses import dataclass, field, asdict
7
+ import time
8
+
9
+
10
+ @dataclass
11
+ class TaskMessage:
12
+ """
13
+ 任务消息对象
14
+
15
+ 这是一个完全独立的类,不依赖任何task定义。
16
+ 可以在没有执行器代码的项目中单独使用。
17
+
18
+ 使用示例:
19
+ # 创建任务消息
20
+ msg = TaskMessage(
21
+ queue="order_processing",
22
+ args=(12345,),
23
+ kwargs={"customer_id": "C001", "amount": 99.99},
24
+ delay=5 # 延迟5秒执行
25
+ )
26
+
27
+ # 批量创建
28
+ messages = [
29
+ TaskMessage(queue="email", kwargs={"to": "user1@example.com"}),
30
+ TaskMessage(queue="email", kwargs={"to": "user2@example.com"}),
31
+ ]
32
+
33
+ # 发送
34
+ await app.send_tasks([msg])
35
+ await app.send_tasks(messages)
36
+ """
37
+
38
+ # 必需参数
39
+ queue: str # 队列名称(必需)
40
+
41
+ # 任务参数
42
+ args: Tuple = field(default_factory=tuple)
43
+ kwargs: Dict[str, Any] = field(default_factory=dict)
44
+
45
+ # 执行选项
46
+ delay: Optional[int] = None # 延迟执行(秒)
47
+ priority: Optional[int] = None # 优先级(1最高,数字越大优先级越低)
48
+
49
+ # 调度相关
50
+ scheduled_task_id: Optional[int] = None # 定时任务ID
51
+
52
+ # 路由信息(用于复杂的路由场景)
53
+ routing: Optional[Dict[str, Any]] = None
54
+
55
+ # 元数据
56
+ trigger_time: Optional[float] = None # 触发时间
57
+
58
+ def __post_init__(self):
59
+ """初始化后处理"""
60
+ # 自动设置触发时间
61
+ if self.trigger_time is None:
62
+ self.trigger_time = time.time()
63
+
64
+ def to_dict(self) -> dict:
65
+ """
66
+ 转换为字典格式(用于序列化发送到Redis)
67
+
68
+ Returns:
69
+ dict: 消息字典,只包含非None的字段
70
+ """
71
+ data = {
72
+ 'queue': self.queue,
73
+ 'args': self.args,
74
+ 'kwargs': self.kwargs,
75
+ 'trigger_time': self.trigger_time
76
+ }
77
+
78
+ # 添加可选字段(只添加非None的)
79
+ optional_fields = [
80
+ 'delay', 'priority', 'scheduled_task_id', 'routing'
81
+ ]
82
+
83
+ for field_name in optional_fields:
84
+ value = getattr(self, field_name)
85
+ if value is not None:
86
+ # 对于空列表/字典,也不添加
87
+ if isinstance(value, (list, dict)) and not value:
88
+ continue
89
+ data[field_name] = value
90
+
91
+ return data
92
+
93
+ @classmethod
94
+ def from_dict(cls, data: dict) -> 'TaskMessage':
95
+ """
96
+ 从字典创建TaskMessage实例
97
+
98
+ Args:
99
+ data: 消息字典
100
+
101
+ Returns:
102
+ TaskMessage: 任务消息实例
103
+ """
104
+ # 提取构造函数需要的参数
105
+ init_fields = {
106
+ 'queue', 'args', 'kwargs',
107
+ 'delay', 'priority', 'scheduled_task_id', 'routing', 'trigger_time'
108
+ }
109
+
110
+ init_data = {k: v for k, v in data.items() if k in init_fields}
111
+ return cls(**init_data)
112
+
113
+ def validate(self) -> bool:
114
+ """
115
+ 验证消息是否有效
116
+
117
+ Returns:
118
+ bool: 是否有效
119
+
120
+ Raises:
121
+ ValueError: 如果消息无效
122
+ """
123
+ if not self.queue:
124
+ raise ValueError("Queue name is required")
125
+
126
+ if self.delay and self.delay < 0:
127
+ raise ValueError(f"Delay must be non-negative, got {self.delay}")
128
+
129
+ if self.priority is not None and self.priority < 1:
130
+ raise ValueError(f"Priority must be positive (1 is highest), got {self.priority}")
131
+
132
+ return True
133
+
134
+ def __repr__(self) -> str:
135
+ """友好的字符串表示"""
136
+ parts = [f"TaskMessage(queue='{self.queue}'"]
137
+
138
+ if self.args:
139
+ parts.append(f"args={self.args}")
140
+
141
+ if self.kwargs:
142
+ parts.append(f"kwargs={self.kwargs}")
143
+
144
+ if self.delay:
145
+ parts.append(f"delay={self.delay}s")
146
+
147
+ return ", ".join(parts) + ")"
@@ -33,34 +33,44 @@ class OfflineWorkerRecovery:
33
33
  恢复指定队列的离线worker的pending消息
34
34
  """
35
35
  total_recovered = 0
36
-
36
+ logger.debug(f'恢复指定队列的离线worker的pending消息')
37
37
  try:
38
38
  # 获取当前consumer名称
39
39
  if not current_consumer_name and self.consumer_manager:
40
- current_consumer_name = self.consumer_manager.get_consumer_name(queue)
40
+ # 对于优先级队列,使用基础队列名来获取consumer
41
+ base_queue = queue
42
+ if ':' in queue and queue.rsplit(':', 1)[-1].isdigit():
43
+ base_queue = queue.rsplit(':', 1)[0]
44
+
45
+ current_consumer_name = self.consumer_manager.get_consumer_name(base_queue)
46
+
47
+ # 对于优先级队列,consumer名称需要添加队列后缀
48
+ if current_consumer_name and base_queue != queue:
49
+ priority_suffix = queue.rsplit(':', 1)[-1]
50
+ current_consumer_name = f"{current_consumer_name}:{priority_suffix}"
41
51
 
42
52
  if not current_consumer_name:
43
53
  logger.error(f"Cannot get current consumer name for queue {queue}")
44
54
  return 0
45
55
 
46
- # logger.info(f"Starting recovery for queue {queue} with consumer {current_consumer_name}")
56
+ logger.debug(f"Starting recovery for queue {queue} with consumer {current_consumer_name}")
47
57
 
48
58
  # 获取所有离线worker
49
59
  offline_workers = await self._find_offline_workers(queue)
50
- # logger.info(f'{offline_workers=}')
51
60
  if not offline_workers:
52
61
  logger.debug(f"No offline workers found for queue {queue}")
53
62
  return 0
54
63
 
55
- logger.info(f"Found {len(offline_workers)} offline workers for queue {queue}")
64
+ logger.debug(f"Found {len(offline_workers)} offline workers for queue {queue}")
56
65
 
57
66
  # 处理每个离线worker
58
67
  for worker_key, worker_data in offline_workers:
59
68
  if self._stop_recovery:
60
- logger.info("Stopping recovery due to shutdown signal")
69
+ logger.debug("Stopping recovery due to shutdown signal")
61
70
  break
62
71
 
63
- # logger.info(f"Processing offline worker: {worker_key}")
72
+ # logger.debug(f'恢复指定队列的离线worker的pending消息 {offline_workers=}')
73
+ # logger.info(f"Processing offline worker: {worker_key} {worker_data=} {queue=}")
64
74
  recovered = await self._recover_worker_messages(
65
75
  queue=queue,
66
76
  worker_key=worker_key,
@@ -113,20 +123,29 @@ class OfflineWorkerRecovery:
113
123
  value = v.decode('utf-8') if isinstance(v, bytes) else v
114
124
  decoded_worker_data[key] = value
115
125
 
116
- # logger.info(f'{worker_key=} {decoded_worker_data=}')
117
- # logger.info(f'{decoded_worker_data=}')
126
+ # logger.debug(f'{worker_key=} {decoded_worker_data=}')
127
+ # logger.debug(f'{decoded_worker_data=}')
118
128
  # 检查worker是否离线且消息未转移
119
129
  is_alive = decoded_worker_data.get('is_alive', 'false').lower() == 'true'
120
130
  messages_transferred = decoded_worker_data.get('messages_transferred', 'false').lower() == 'true'
121
- # logger.info(f'{worker_key=} {is_alive=} {messages_transferred=} {not is_alive and not messages_transferred}')
131
+ # logger.debug(f'{worker_key=} {is_alive=} {messages_transferred=} {not is_alive and not messages_transferred}')
122
132
  # 找到离线且消息未转移的worker
123
133
  if not is_alive and not messages_transferred:
124
134
  queues_str = decoded_worker_data.get('queues', '')
125
135
  worker_queues = queues_str.split(',') if queues_str else []
126
136
 
127
- # logger.info(f'{worker_queues=} {queue=}')
137
+ # logger.debug(f'{worker_queues=} {queue=}')
128
138
  # 检查这个worker是否负责当前队列
129
- if queue in worker_queues:
139
+ # 支持优先级队列:如果queue是"base:priority"格式,检查worker是否负责base队列
140
+ queue_matched = False
141
+ if ':' in queue and queue.rsplit(':', 1)[-1].isdigit():
142
+ # 这是优先级队列,提取基础队列名
143
+ base_queue = queue.rsplit(':', 1)[0]
144
+ queue_matched = base_queue in worker_queues
145
+ else:
146
+ # 普通队列
147
+ queue_matched = queue in worker_queues
148
+ if queue_matched:
130
149
  offline_workers.append((worker_key, decoded_worker_data))
131
150
 
132
151
  except Exception as e:
@@ -153,10 +172,7 @@ class OfflineWorkerRecovery:
153
172
  """
154
173
  恢复单个worker的pending消息
155
174
 
156
- 简化逻辑:
157
- 1. 使用离线worker的consumer_id构建consumer名称:{consumer_id}-{queue}
158
- 2. 默认group名称就是{prefixed_queue}
159
- 3. 直接获取并转移该consumer的pending消息
175
+ 从worker_data中获取所有的group_info字段,恢复对应的pending消息
160
176
  """
161
177
  total_claimed = 0
162
178
 
@@ -164,110 +180,123 @@ class OfflineWorkerRecovery:
164
180
  # worker_data 现在已经是解码后的字典
165
181
  consumer_id = worker_data.get('consumer_id')
166
182
 
167
- # 构建离线worker的consumer名称
168
- offline_consumer = f"{consumer_id}-{queue}"
169
-
170
- logger.info(f"Recovering messages from offline worker: {offline_consumer}")
183
+ # 从worker_data中提取group_info字段
184
+ group_infos = []
185
+ import json
186
+ for key, value in worker_data.items():
187
+ if key.startswith('group_info:'):
188
+ try:
189
+ group_info = json.loads(value)
190
+ # 只处理属于当前队列的group
191
+ if group_info.get('queue') == queue:
192
+ group_infos.append(group_info)
193
+ logger.info(f"Found group info for queue {queue}: {group_info}")
194
+ except Exception as e:
195
+ logger.error(f"Error parsing group_info: {e}")
171
196
 
172
- # 检查是否是同一个worker(避免自己接管自己的消息)
173
- if current_consumer_name == offline_consumer or current_consumer_name.startswith(f"{offline_consumer}:"):
174
- logger.info(f"Skipping {offline_consumer} - same worker")
197
+ if not group_infos:
198
+ logger.debug(f"No group_info found for queue {queue} in worker {worker_key}")
199
+ # 即使没有group_info,也要标记为已处理,避免重复扫描
200
+ await self.async_redis_client.hset(worker_key, 'messages_transferred', 'true')
175
201
  return 0
176
-
177
- # 使用分布式锁防止并发处理
178
- lock_key = f"{self.redis_prefix}:CLAIM:LOCK:{offline_consumer}"
179
- lock = AsyncLock(
180
- self.async_redis_client,
181
- lock_key,
182
- timeout=60,
183
- blocking=False # 不阻塞,直接跳过
184
- )
185
202
 
186
- if not await lock.acquire():
187
- logger.info(f"Another process is claiming messages for {offline_consumer}")
188
- return 0
189
-
190
- try:
191
- # 获取Stream的key
192
- prefixed_queue = self.queue_formatter(queue)
193
- logger.debug(f"Stream key: {prefixed_queue}, type: {type(prefixed_queue)}")
194
-
195
- # 获取所有的consumer groups
196
- all_groups = await self._get_consumer_groups(prefixed_queue)
197
- logger.info(f"Found {len(all_groups)} groups for stream {prefixed_queue}")
198
-
199
- for group_name in all_groups:
200
- # 获取该group的所有consumers
203
+ # 在处理任何group之前,先标记该worker的消息已开始转移
204
+ # 避免其他进程重复处理
205
+ await self.async_redis_client.hset(worker_key, 'messages_transferred', 'true')
206
+ logger.info(f"Marked worker {worker_key} as messages_transferred=true")
207
+
208
+ # 处理每个group_info
209
+ for group_info in group_infos:
210
+ stream_key = group_info.get('stream_key')
211
+ group_name = group_info.get('group_name')
212
+ offline_consumer_name = group_info.get('consumer_name')
213
+ task_name = group_info.get('task_name')
214
+
215
+ if not all([stream_key, group_name, offline_consumer_name]):
216
+ logger.warning(f"Incomplete group_info: {group_info}")
217
+ continue
218
+
219
+ logger.info(f"Recovering task {task_name}: stream={stream_key}, group={group_name}, consumer={offline_consumer_name}")
220
+
221
+ # 跳过自己的consumer
222
+ if current_consumer_name == offline_consumer_name:
223
+ logger.debug(f"Skipping own consumer: {offline_consumer_name}")
224
+ continue
225
+
226
+ # 使用分布式锁
227
+ lock_key = f"{self.redis_prefix}:CLAIM:LOCK:{offline_consumer_name}:{group_name}"
228
+ lock = AsyncLock(
229
+ self.async_redis_client,
230
+ lock_key,
231
+ timeout=30,
232
+ blocking=False
233
+ )
234
+
235
+ if not await lock.acquire():
236
+ logger.debug(f"Lock busy for {offline_consumer_name}:{group_name}")
237
+ continue
238
+
201
239
  try:
202
- consumers_info = await self.async_redis_client.xinfo_consumers(prefixed_queue, group_name)
203
- logger.debug(f"Consumers in group {group_name}: {consumers_info}")
240
+ # 获取pending消息数量
241
+ pending_info = await self.async_redis_client.xpending(
242
+ stream_key, group_name
243
+ )
204
244
 
205
- for consumer_info in consumers_info:
206
- # 二进制Redis客户端返回的字典键是字符串,值是bytes
207
- consumer_name = consumer_info.get('name', b'')
208
- if isinstance(consumer_name, bytes):
209
- consumer_name = consumer_name.decode('utf-8')
245
+ if pending_info and pending_info.get('pending', 0) > 0:
246
+ # 获取具体的pending消息信息
247
+ detailed_pending = await self.async_redis_client.xpending_range(
248
+ stream_key, group_name,
249
+ min='-', max='+', count=100,
250
+ consumername=offline_consumer_name
251
+ )
210
252
 
211
- pending_count = consumer_info.get('pending', 0)
212
- logger.info(f'{offline_consumer=} {consumer_name=}')
213
- # 检查是否是离线worker的consumer
214
- # 离线consumer名称格式:{consumer_id}-{queue} 或 {consumer_id}-{queue}:{task_name}
215
- if consumer_name.startswith(f"{offline_consumer}"):
216
- logger.info(f'{consumer_info=}')
217
- if pending_count > 0:
218
- logger.info(f"Found {pending_count} pending messages for {consumer_name} in group {group_name}")
219
-
220
- # 确定新的consumer名称
221
- # 如果原consumer有task后缀,新consumer也要有相同的task后缀
222
- if ':' in consumer_name and ':' in group_name:
223
- # 提取task后缀
224
- task_suffix = consumer_name.split(':', 1)[1]
225
- # 新consumer也要有相同的task后缀
226
- if ':' in current_consumer_name:
227
- # 如果当前consumer已经有后缀,保持它
228
- new_consumer = current_consumer_name
229
- else:
230
- # 添加task后缀
231
- new_consumer = f"{current_consumer_name}:{task_suffix}"
232
- else:
233
- new_consumer = current_consumer_name
234
-
235
- logger.info(f"Claiming messages from {consumer_name} to {new_consumer}")
236
-
237
- # 转移pending消息
238
- claimed = await self._claim_messages(
239
- prefixed_queue, group_name,
240
- consumer_name, new_consumer
241
- )
253
+ if detailed_pending:
254
+ logger.info(f"Found {len(detailed_pending)} pending messages for {task_name}")
255
+
256
+ # 批量认领消息
257
+ message_ids = [msg['message_id'] for msg in detailed_pending]
258
+ claimed_messages = await self.async_redis_client.xclaim(
259
+ stream_key, group_name,
260
+ current_consumer_name,
261
+ min_idle_time=0,
262
+ message_ids=message_ids
263
+ )
264
+
265
+ if claimed_messages:
266
+ logger.info(f"Claimed {len(claimed_messages)} messages for task {task_name}")
267
+ total_claimed += len(claimed_messages)
242
268
 
243
- if claimed:
244
- total_claimed += len(claimed)
245
- logger.info(f"Successfully claimed {len(claimed)} messages")
246
-
247
- # 处理转移的消息
248
- if process_message_callback:
249
- for msg_id, msg_data in claimed:
250
- await process_message_callback(msg_id, msg_data, queue, consumer_id)
251
- elif event_queue:
252
- for msg_id, msg_data in claimed:
253
- await self._put_to_event_queue(
254
- msg_id, msg_data, queue,
255
- event_queue, new_consumer,
256
- group_name, consumer_name
257
- )
258
- except Exception as e:
259
- logger.error(f"Error processing group {group_name}: {e}")
260
-
261
- # 标记该worker的消息已转移(即使没有消息也要标记,避免重复处理)
262
- await self.async_redis_client.hset(worker_key, 'messages_transferred', 'true')
263
- if total_claimed > 0:
264
- logger.info(f"Transferred total {total_claimed} messages from worker {consumer_id}")
265
- else:
266
- logger.info(f"No messages to transfer from worker {consumer_id}, marked as processed")
267
-
268
- finally:
269
- await lock.release()
270
-
269
+ # 如果提供了event_queue,将消息放入队列
270
+ if event_queue:
271
+ for msg_id, msg_data in claimed_messages:
272
+ if isinstance(msg_id, bytes):
273
+ msg_id = msg_id.decode('utf-8')
274
+
275
+ # 解析消息数据
276
+ data_field = msg_data.get(b'data') or msg_data.get('data')
277
+ if data_field:
278
+ try:
279
+ import msgpack
280
+ parsed_data = msgpack.unpackb(data_field, raw=False)
281
+ # 添加必要的元数据
282
+ parsed_data['_task_name'] = task_name
283
+ parsed_data['queue'] = queue
284
+
285
+ # 构建任务项
286
+ task_item = {
287
+ 'queue': queue,
288
+ 'event_id': msg_id,
289
+ 'event_data': parsed_data,
290
+ 'consumer': current_consumer_name,
291
+ 'group_name': group_name
292
+ }
293
+
294
+ await event_queue.put(task_item)
295
+ except Exception as e:
296
+ logger.error(f"Error processing claimed message: {e}")
297
+ finally:
298
+ await lock.release()
299
+
271
300
  except Exception as e:
272
301
  logger.error(f"Error recovering messages: {e}")
273
302
 
@@ -346,7 +375,7 @@ class OfflineWorkerRecovery:
346
375
 
347
376
  if message_ids:
348
377
  # 使用XCLAIM转移消息
349
- logger.info(f"Claiming {len(message_ids)} messages from {old_consumer} to {new_consumer}")
378
+ logger.debug(f"Claiming {len(message_ids)} messages from {old_consumer} to {new_consumer}")
350
379
 
351
380
  claimed = await self.async_redis_client.xclaim(
352
381
  stream_key, group_name,
@@ -388,6 +417,44 @@ class OfflineWorkerRecovery:
388
417
  else:
389
418
  event_data = msg_data
390
419
 
420
+ # 从 group_name 中提取 task_name
421
+ # group_name 的格式是: "jettask:QUEUE:{queue}:{task_name}"
422
+ task_name = None
423
+ if group_name and ':' in group_name:
424
+ parts = group_name.split(':')
425
+ # 查找最后一个非数字部分作为task_name
426
+ for i in range(len(parts) - 1, -1, -1):
427
+ part = parts[i]
428
+ # 跳过优先级数字
429
+ if not part.isdigit() and part not in ['jettask', 'QUEUE', queue]:
430
+ task_name = part
431
+ logger.debug(f"Extracted task_name '{task_name}' from group_name '{group_name}'")
432
+ break
433
+
434
+ # 如果从group_name提取失败,尝试从consumer名称提取
435
+ if not task_name and ':' in consumer and ':' in group_name:
436
+ # consumer格式可能是: "{consumer_id}:{task_name}"
437
+ consumer_parts = consumer.split(':')
438
+ if len(consumer_parts) > 1:
439
+ potential_task = consumer_parts[-1]
440
+ # 确保不是优先级数字
441
+ if not potential_task.isdigit():
442
+ task_name = potential_task
443
+ logger.debug(f"Extracted task_name '{task_name}' from consumer '{consumer}'")
444
+
445
+ # 如果还是没有task_name,检查event_data中是否已有
446
+ if not task_name and '_task_name' in event_data:
447
+ task_name = event_data['_task_name']
448
+ logger.debug(f"Using existing _task_name from event_data: '{task_name}'")
449
+
450
+ # 确保event_data中有_task_name字段
451
+ if task_name:
452
+ event_data['_task_name'] = task_name
453
+ logger.debug(f"Added _task_name '{task_name}' to recovered message")
454
+ else:
455
+ # 如果无法确定task_name,记录警告
456
+ logger.warning(f"Could not determine task_name for recovered message. group_name='{group_name}', consumer='{consumer}'")
457
+
391
458
  # 构建事件
392
459
  event = {
393
460
  'event_id': msg_id.decode() if isinstance(msg_id, bytes) else msg_id,