jettask 0.2.1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. jettask/constants.py +213 -0
  2. jettask/core/app.py +525 -205
  3. jettask/core/cli.py +193 -185
  4. jettask/core/consumer_manager.py +126 -34
  5. jettask/core/context.py +3 -0
  6. jettask/core/enums.py +137 -0
  7. jettask/core/event_pool.py +501 -168
  8. jettask/core/message.py +147 -0
  9. jettask/core/offline_worker_recovery.py +181 -114
  10. jettask/core/task.py +10 -174
  11. jettask/core/task_batch.py +153 -0
  12. jettask/core/unified_manager_base.py +243 -0
  13. jettask/core/worker_scanner.py +54 -54
  14. jettask/executors/asyncio.py +184 -64
  15. jettask/webui/backend/config.py +51 -0
  16. jettask/webui/backend/data_access.py +2083 -92
  17. jettask/webui/backend/data_api.py +3294 -0
  18. jettask/webui/backend/dependencies.py +261 -0
  19. jettask/webui/backend/init_meta_db.py +158 -0
  20. jettask/webui/backend/main.py +1358 -69
  21. jettask/webui/backend/main_unified.py +78 -0
  22. jettask/webui/backend/main_v2.py +394 -0
  23. jettask/webui/backend/namespace_api.py +295 -0
  24. jettask/webui/backend/namespace_api_old.py +294 -0
  25. jettask/webui/backend/namespace_data_access.py +611 -0
  26. jettask/webui/backend/queue_backlog_api.py +727 -0
  27. jettask/webui/backend/queue_stats_v2.py +521 -0
  28. jettask/webui/backend/redis_monitor_api.py +476 -0
  29. jettask/webui/backend/unified_api_router.py +1601 -0
  30. jettask/webui/db_init.py +204 -32
  31. jettask/webui/frontend/package-lock.json +492 -1
  32. jettask/webui/frontend/package.json +4 -1
  33. jettask/webui/frontend/src/App.css +105 -7
  34. jettask/webui/frontend/src/App.jsx +49 -20
  35. jettask/webui/frontend/src/components/NamespaceSelector.jsx +166 -0
  36. jettask/webui/frontend/src/components/QueueBacklogChart.jsx +298 -0
  37. jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +638 -0
  38. jettask/webui/frontend/src/components/QueueDetailsTable.css +65 -0
  39. jettask/webui/frontend/src/components/QueueDetailsTable.jsx +487 -0
  40. jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +465 -0
  41. jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +423 -0
  42. jettask/webui/frontend/src/components/TaskFilter.jsx +425 -0
  43. jettask/webui/frontend/src/components/TimeRangeSelector.css +21 -0
  44. jettask/webui/frontend/src/components/TimeRangeSelector.jsx +160 -0
  45. jettask/webui/frontend/src/components/layout/AppLayout.css +95 -0
  46. jettask/webui/frontend/src/components/layout/AppLayout.jsx +49 -0
  47. jettask/webui/frontend/src/components/layout/Header.css +34 -10
  48. jettask/webui/frontend/src/components/layout/Header.jsx +31 -23
  49. jettask/webui/frontend/src/components/layout/SideMenu.css +137 -0
  50. jettask/webui/frontend/src/components/layout/SideMenu.jsx +209 -0
  51. jettask/webui/frontend/src/components/layout/TabsNav.css +244 -0
  52. jettask/webui/frontend/src/components/layout/TabsNav.jsx +206 -0
  53. jettask/webui/frontend/src/components/layout/UserInfo.css +197 -0
  54. jettask/webui/frontend/src/components/layout/UserInfo.jsx +197 -0
  55. jettask/webui/frontend/src/contexts/NamespaceContext.jsx +72 -0
  56. jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +245 -0
  57. jettask/webui/frontend/src/main.jsx +1 -0
  58. jettask/webui/frontend/src/pages/Alerts.jsx +684 -0
  59. jettask/webui/frontend/src/pages/Dashboard.jsx +1330 -0
  60. jettask/webui/frontend/src/pages/QueueDetail.jsx +1109 -10
  61. jettask/webui/frontend/src/pages/QueueMonitor.jsx +236 -115
  62. jettask/webui/frontend/src/pages/Queues.jsx +5 -1
  63. jettask/webui/frontend/src/pages/ScheduledTasks.jsx +809 -0
  64. jettask/webui/frontend/src/pages/Settings.jsx +800 -0
  65. jettask/webui/frontend/src/services/api.js +7 -5
  66. jettask/webui/frontend/src/utils/suppressWarnings.js +22 -0
  67. jettask/webui/frontend/src/utils/userPreferences.js +154 -0
  68. jettask/webui/multi_namespace_consumer.py +543 -0
  69. jettask/webui/pg_consumer.py +983 -246
  70. jettask/webui/static/dist/assets/index-7129cfe1.css +1 -0
  71. jettask/webui/static/dist/assets/index-8d1935cc.js +774 -0
  72. jettask/webui/static/dist/index.html +2 -2
  73. jettask/webui/task_center.py +216 -0
  74. jettask/webui/task_center_client.py +150 -0
  75. jettask/webui/unified_consumer_manager.py +193 -0
  76. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/METADATA +1 -1
  77. jettask-0.2.4.dist-info/RECORD +134 -0
  78. jettask/webui/pg_consumer_slow.py +0 -1099
  79. jettask/webui/pg_consumer_test.py +0 -678
  80. jettask/webui/static/dist/assets/index-823408e8.css +0 -1
  81. jettask/webui/static/dist/assets/index-9968b0b8.js +0 -543
  82. jettask/webui/test_pg_consumer_recovery.py +0 -547
  83. jettask/webui/test_recovery_simple.py +0 -492
  84. jettask/webui/test_self_recovery.py +0 -467
  85. jettask-0.2.1.dist-info/RECORD +0 -91
  86. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/WHEEL +0 -0
  87. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/entry_points.txt +0 -0
  88. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/licenses/LICENSE +0 -0
  89. {jettask-0.2.1.dist-info → jettask-0.2.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,521 @@
1
+ """
2
+ 队列统计API v2 - 支持消费者组和优先级队列
3
+ """
4
+ import asyncio
5
+ import logging
6
+ from typing import Dict, List, Optional, Any
7
+ from datetime import datetime, timezone
8
+ from collections import defaultdict
9
+ import redis.asyncio as redis
10
+ from sqlalchemy import text
11
+ from sqlalchemy.ext.asyncio import AsyncSession
12
+ from jettask.constants import is_internal_consumer
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class QueueStatsV2:
18
+ """队列统计服务V2 - 支持消费者组详情"""
19
+
20
+ def __init__(self, redis_client: redis.Redis, pg_session: Optional[AsyncSession] = None, redis_prefix: str = "default"):
21
+ self.redis_client = redis_client
22
+ self.pg_session = pg_session
23
+ self.redis_prefix = redis_prefix
24
+
25
+ async def get_queue_stats_grouped(self, time_filter: Optional[Dict[str, Any]] = None) -> List[dict]:
26
+ """
27
+ 获取按队列分组的统计信息(前端使用的格式)
28
+ 返回格式:[{
29
+ 'queue_name': 'queue1',
30
+ 'consumer_groups': [...],
31
+ 'consumer_groups_count': 3,
32
+ 'total_length': 1000,
33
+ 'priority_queues': [0, 1, 2],
34
+ ...
35
+ }]
36
+ """
37
+ # 先获取扁平化的数据
38
+ flat_stats = await self.get_queue_stats_with_groups(time_filter)
39
+
40
+ # 转换为分组格式
41
+ return await self._transform_to_grouped_format(flat_stats)
42
+
43
+ async def get_queue_stats_with_groups(self, time_filter: Optional[Dict[str, Any]] = None) -> List[dict]:
44
+ """
45
+ 获取扁平化的队列统计信息 - 优化版本
46
+
47
+ Args:
48
+ time_filter: 时间筛选条件,如 {'start_time': datetime, 'end_time': datetime}
49
+
50
+ 返回格式: 每个优先级队列的每个消费者组作为一条记录
51
+ """
52
+ try:
53
+ # 1. 从Redis获取队列和消费者组结构(只获取基本信息,不获取队列长度)
54
+ all_stats = await self._get_queue_structure_from_redis()
55
+
56
+ if not all_stats:
57
+ return []
58
+
59
+ # 2. 从数据库批量补充统计信息(队列长度、成功率等)
60
+ await self._enrich_with_db_stats(all_stats, time_filter)
61
+
62
+ # 3. 按基础队列名和优先级排序
63
+ all_stats.sort(key=lambda x: (
64
+ x['base_queue_name'],
65
+ x['priority'] if x['priority'] is not None else 999,
66
+ x['group_name'] or ''
67
+ ))
68
+
69
+ return all_stats
70
+
71
+ except Exception as e:
72
+ logger.error(f"获取队列统计信息失败: {e}")
73
+ raise
74
+
75
+ async def _get_queue_structure_from_redis(self) -> List[dict]:
76
+ """从Redis获取队列和消费者组的基本结构,不获取队列长度"""
77
+ try:
78
+ # 1. 直接使用KEYS命令获取所有队列键(队列数量不多时性能更好)
79
+ queue_pattern = f"{self.redis_prefix}:QUEUE:*"
80
+ queue_keys = await self.redis_client.keys(queue_pattern)
81
+ if not queue_keys:
82
+ return []
83
+
84
+ # 2. 批量获取消费者组信息(不获取队列长度)
85
+ pipe = self.redis_client.pipeline()
86
+ for queue_key in queue_keys:
87
+ pipe.xinfo_groups(queue_key)
88
+
89
+ try:
90
+ groups_results = await pipe.execute()
91
+ except redis.ResponseError:
92
+ # 某些队列可能不是stream类型,逐个处理
93
+ groups_results = []
94
+ for queue_key in queue_keys:
95
+ try:
96
+ result = await self.redis_client.xinfo_groups(queue_key)
97
+ groups_results.append(result)
98
+ except redis.ResponseError:
99
+ groups_results.append([])
100
+ # 3. 构建统计记录
101
+ all_stats = []
102
+ for i, queue_key in enumerate(queue_keys):
103
+ # 处理bytes和str
104
+ if isinstance(queue_key, bytes):
105
+ queue_key_str = queue_key.decode('utf-8')
106
+ else:
107
+ queue_key_str = queue_key
108
+
109
+ # 提取队列名
110
+ queue_name = queue_key_str.replace(f"{self.redis_prefix}:QUEUE:", "")
111
+
112
+ # 提取基础队列名和优先级
113
+ base_queue_name = self._get_base_queue_name(queue_name)
114
+ priority = self._extract_priority(queue_name)
115
+
116
+ # 获取消费者组信息
117
+ groups_info = groups_results[i] if i < len(groups_results) else []
118
+
119
+ if groups_info:
120
+ for group in groups_info:
121
+ # 处理可能的bytes类型
122
+ group_name = group.get('name', group.get(b'name', ''))
123
+ if isinstance(group_name, bytes):
124
+ group_name = group_name.decode('utf-8')
125
+
126
+ # 过滤掉内部消费者组
127
+ if is_internal_consumer(group_name):
128
+ continue
129
+
130
+ consumers_count = group.get('consumers', group.get(b'consumers', 0))
131
+ pending_count = group.get('pending', group.get(b'pending', 0))
132
+
133
+ last_delivered_id = group.get('last-delivered-id', group.get(b'last-delivered-id', ''))
134
+ if isinstance(last_delivered_id, bytes):
135
+ last_delivered_id = last_delivered_id.decode('utf-8')
136
+
137
+ # 过滤掉从未处理过消息的消费者组(last_delivered_id='0-0')
138
+ if last_delivered_id == '0-0':
139
+ continue
140
+
141
+ # 提取任务名
142
+ task_name = self._extract_task_name(group_name)
143
+
144
+ # 创建扁平化的记录
145
+ stat_record = {
146
+ 'base_queue_name': base_queue_name,
147
+ 'full_queue_name': queue_name,
148
+ 'priority': priority,
149
+ 'queue_length': 0, # 将从数据库获取
150
+ 'group_name': group_name,
151
+ 'task_name': task_name,
152
+ 'consumers': consumers_count,
153
+ 'pending': pending_count,
154
+ 'last_delivered_id': last_delivered_id,
155
+ 'visible_messages': 0, # 将计算
156
+ 'invisible_messages': pending_count,
157
+ 'success_count': 0, # 将从数据库获取
158
+ 'failed_count': 0, # 将从数据库获取
159
+ 'success_rate': 0.0,
160
+ 'processing_rate': 0.0,
161
+ 'avg_execution_time': 0.0
162
+ }
163
+
164
+ all_stats.append(stat_record)
165
+ else:
166
+ # 队列没有消费者组,仍然记录队列信息
167
+ stat_record = {
168
+ 'base_queue_name': base_queue_name,
169
+ 'full_queue_name': queue_name,
170
+ 'priority': priority,
171
+ 'queue_length': 0, # 将从数据库获取
172
+ 'group_name': None,
173
+ 'task_name': None,
174
+ 'consumers': 0,
175
+ 'pending': 0,
176
+ 'last_delivered_id': None,
177
+ 'visible_messages': 0,
178
+ 'invisible_messages': 0,
179
+ 'success_count': 0,
180
+ 'failed_count': 0,
181
+ 'success_rate': 0.0,
182
+ 'processing_rate': 0.0,
183
+ 'avg_execution_time': 0.0
184
+ }
185
+ all_stats.append(stat_record)
186
+
187
+ return all_stats
188
+
189
+ except Exception as e:
190
+ logger.error(f"从Redis获取队列结构失败: {e}")
191
+ import traceback
192
+ logger.error(traceback.format_exc())
193
+ return []
194
+
195
+ async def _enrich_with_db_stats(self, stats: List[dict], time_filter: Optional[Dict[str, Any]] = None):
196
+ """从数据库批量补充统计信息"""
197
+ if not self.pg_session or not stats:
198
+ return
199
+
200
+ try:
201
+ # 收集所有唯一的队列和消费者组
202
+ unique_queues = list(set(stat['full_queue_name'] for stat in stats))
203
+ unique_groups = list(set(stat['group_name'] for stat in stats if stat['group_name']))
204
+
205
+ if not unique_queues:
206
+ return
207
+
208
+ # 构建时间筛选条件
209
+ time_clause = ""
210
+ params = {
211
+ 'namespace': self.redis_prefix,
212
+ 'queues': unique_queues
213
+ }
214
+
215
+ if unique_groups:
216
+ params['groups'] = unique_groups
217
+
218
+ if time_filter:
219
+ if 'start_time' in time_filter:
220
+ time_clause += " AND t.created_at >= :start_time"
221
+ params['start_time'] = time_filter['start_time']
222
+ if 'end_time' in time_filter:
223
+ time_clause += " AND t.created_at <= :end_time"
224
+ params['end_time'] = time_filter['end_time']
225
+
226
+ # 修复的查询 - 正确计算每个队列和消费者组的统计
227
+ query = text(f"""
228
+ WITH base_data AS (
229
+ -- 获取所有任务及其运行状态
230
+ SELECT
231
+ t.queue,
232
+ t.stream_id,
233
+ tr.consumer_group,
234
+ tr.status,
235
+ tr.duration,
236
+ tr.end_time,
237
+ CASE WHEN tr.stream_id IS NULL THEN 1 ELSE 0 END as is_unprocessed
238
+ FROM tasks t
239
+ LEFT JOIN task_runs tr ON t.stream_id = tr.stream_id
240
+ WHERE t.namespace = :namespace
241
+ AND t.queue = ANY(:queues) {time_clause}
242
+ ),
243
+ group_stats AS (
244
+ -- 按队列和消费者组分组统计
245
+ SELECT
246
+ queue,
247
+ consumer_group,
248
+ COUNT(DISTINCT stream_id) as total_tasks,
249
+ SUM(is_unprocessed) as unprocessed_count,
250
+ COUNT(DISTINCT CASE WHEN status IS NOT NULL THEN stream_id END) as processed_tasks,
251
+ COUNT(DISTINCT CASE WHEN status = 'pending' THEN stream_id END) as pending_in_runs,
252
+ COUNT(DISTINCT CASE WHEN status = 'success' THEN stream_id END) as success_count,
253
+ COUNT(DISTINCT CASE WHEN status = 'error' THEN stream_id END) as error_count,
254
+ AVG(CASE WHEN status = 'success' AND duration IS NOT NULL THEN duration END) as avg_execution_time,
255
+ COUNT(DISTINCT CASE WHEN status = 'success' AND end_time >= NOW() - INTERVAL '1 minute' THEN stream_id END) as recent_completed
256
+ FROM base_data
257
+ WHERE consumer_group IS NOT NULL
258
+ GROUP BY queue, consumer_group
259
+ ),
260
+ queue_totals AS (
261
+ -- 获取每个队列的总体统计(包括未处理的任务)
262
+ SELECT
263
+ queue,
264
+ COUNT(DISTINCT stream_id) as queue_total_tasks,
265
+ SUM(is_unprocessed) as queue_unprocessed_tasks
266
+ FROM base_data
267
+ GROUP BY queue
268
+ )
269
+ -- 合并结果
270
+ SELECT
271
+ gs.queue,
272
+ gs.consumer_group,
273
+ COALESCE(qt.queue_total_tasks, gs.total_tasks) as total_tasks,
274
+ gs.processed_tasks,
275
+ COALESCE(qt.queue_unprocessed_tasks, 0) as unprocessed_tasks,
276
+ gs.pending_in_runs,
277
+ gs.success_count,
278
+ gs.error_count,
279
+ gs.avg_execution_time,
280
+ gs.recent_completed
281
+ FROM group_stats gs
282
+ LEFT JOIN queue_totals qt ON gs.queue = qt.queue
283
+
284
+ UNION ALL
285
+
286
+ -- 对于没有消费者组的队列,返回未处理任务的统计
287
+ SELECT
288
+ queue,
289
+ NULL as consumer_group,
290
+ queue_total_tasks as total_tasks,
291
+ 0 as processed_tasks,
292
+ queue_unprocessed_tasks as unprocessed_tasks,
293
+ 0 as pending_in_runs,
294
+ 0 as success_count,
295
+ 0 as error_count,
296
+ NULL as avg_execution_time,
297
+ 0 as recent_completed
298
+ FROM queue_totals
299
+ WHERE queue_unprocessed_tasks > 0
300
+ """)
301
+
302
+ result = await self.pg_session.execute(query, params)
303
+ # 构建结果映射
304
+ db_stats = {}
305
+ for row in result:
306
+ if row.consumer_group:
307
+ key = f"{row.queue}|{row.consumer_group}"
308
+ else:
309
+ key = f"{row.queue}|NONE"
310
+ db_stats[key] = row
311
+
312
+ # 更新stats数组
313
+ for stat in stats:
314
+ if stat['group_name']:
315
+ key = f"{stat['full_queue_name']}|{stat['group_name']}"
316
+ else:
317
+ key = f"{stat['full_queue_name']}|NONE"
318
+
319
+ if key in db_stats:
320
+ row = db_stats[key]
321
+ stat['queue_length'] = row.total_tasks or 0
322
+ stat['success_count'] = row.success_count or 0
323
+ stat['failed_count'] = row.error_count or 0
324
+ stat['avg_execution_time'] = float(row.avg_execution_time or 0)
325
+ stat['processing_rate'] = row.recent_completed or 0
326
+
327
+ # 计算成功率
328
+ total = stat['success_count'] + stat['failed_count']
329
+ if total > 0:
330
+ stat['success_rate'] = round((stat['success_count'] / total) * 100, 2)
331
+
332
+ # 改进的可见消息计算逻辑
333
+ # 可见消息 = 未被处理的任务(tasks有但task_runs没有) + (task_runs中pending的 - Redis中pending的)
334
+ unprocessed_tasks = row.unprocessed_tasks or 0 # tasks表有但task_runs表没有的
335
+ pending_in_runs = row.pending_in_runs or 0 # task_runs表中status='pending'的
336
+ redis_pending = stat['pending'] # Redis中pending的
337
+
338
+ # 未被worker领取的 + 已完成但未确认的
339
+ stat['visible_messages'] = unprocessed_tasks + max(0, pending_in_runs - redis_pending)
340
+
341
+ except Exception as e:
342
+ logger.error(f"从数据库补充统计信息失败: {e}")
343
+ import traceback
344
+ logger.error(traceback.format_exc())
345
+
346
+
347
+ async def _get_active_workers_for_queue(self, base_queue_name: str) -> int:
348
+ """获取指定基础队列的活跃Worker数量"""
349
+ try:
350
+ import time
351
+ active_workers = 0
352
+
353
+ # 扫描所有worker键
354
+ worker_pattern = f"{self.redis_prefix}:WORKER:*"
355
+ worker_keys = await self.redis_client.keys(worker_pattern)
356
+
357
+ for worker_key in worker_keys:
358
+ try:
359
+ worker_info = await self.redis_client.hgetall(worker_key)
360
+ if worker_info:
361
+ # 检查心跳是否在60秒内
362
+ last_heartbeat = worker_info.get(b'last_heartbeat', worker_info.get('last_heartbeat'))
363
+ if last_heartbeat:
364
+ if isinstance(last_heartbeat, bytes):
365
+ last_heartbeat = last_heartbeat.decode('utf-8')
366
+ heartbeat_time = float(last_heartbeat)
367
+ if time.time() - heartbeat_time < 60: # 60秒内有心跳
368
+ # 检查队列列表是否包含基础队列名
369
+ queues = worker_info.get(b'queues', worker_info.get('queues', ''))
370
+ if isinstance(queues, bytes):
371
+ queues = queues.decode('utf-8')
372
+ if base_queue_name in queues:
373
+ active_workers += 1
374
+ except Exception:
375
+ continue
376
+
377
+ return active_workers
378
+ except Exception as e:
379
+ logger.warning(f"获取活跃workers失败: {e}")
380
+ return 0
381
+
382
+ async def _transform_to_grouped_format(self, flat_stats: List[dict]) -> List[dict]:
383
+ """将扁平化的统计数据转换为按队列分组的嵌套格式"""
384
+ grouped_data = {}
385
+
386
+ for stat in flat_stats:
387
+ base_queue_name = stat['base_queue_name']
388
+
389
+ if base_queue_name not in grouped_data:
390
+ grouped_data[base_queue_name] = {
391
+ 'queue_name': base_queue_name,
392
+ 'consumer_groups': [],
393
+ 'consumer_groups_count': 0,
394
+ 'total_length': 0,
395
+ 'priority_queues': set(), # 用于收集所有优先级队列
396
+ 'total_visible': 0,
397
+ 'total_invisible': 0,
398
+ 'total_success': 0,
399
+ 'total_failed': 0,
400
+ 'active_workers': 0 # 添加活跃Workers字段
401
+ }
402
+
403
+ # 添加消费者组信息
404
+ if stat['group_name']:
405
+ consumer_group = {
406
+ 'group_name': stat['group_name'],
407
+ 'task_name': stat['task_name'],
408
+ 'queue_name': stat['full_queue_name'], # 包含优先级的完整队列名
409
+ 'priority': stat['priority'],
410
+ 'queue_length': stat['queue_length'],
411
+ 'consumers': stat['consumers'],
412
+ 'pending': stat['pending'],
413
+ 'last_delivered_id': stat['last_delivered_id'],
414
+ 'visible_messages': stat['visible_messages'],
415
+ 'invisible_messages': stat['invisible_messages'],
416
+ 'success_count': stat['success_count'],
417
+ 'failed_count': stat['failed_count'],
418
+ 'success_rate': stat['success_rate'],
419
+ 'processing_rate': stat['processing_rate'],
420
+ 'avg_execution_time': stat['avg_execution_time'],
421
+ 'unique_key': f"{stat['group_name']}_{stat['full_queue_name']}" # 唯一键
422
+ }
423
+ grouped_data[base_queue_name]['consumer_groups'].append(consumer_group)
424
+
425
+ # 累加统计
426
+ grouped_data[base_queue_name]['total_visible'] += stat['visible_messages']
427
+ grouped_data[base_queue_name]['total_invisible'] += stat['invisible_messages']
428
+ grouped_data[base_queue_name]['total_success'] += stat['success_count']
429
+ grouped_data[base_queue_name]['total_failed'] += stat['failed_count']
430
+
431
+ # 收集优先级队列信息(现在priority不会是None,0表示无优先级)
432
+ grouped_data[base_queue_name]['priority_queues'].add(stat['priority'])
433
+
434
+ # 更新队列长度(累加所有优先级队列的长度)
435
+ # 使用字典记录已计算的队列,避免重复
436
+ if base_queue_name not in grouped_data:
437
+ grouped_data[base_queue_name]['calculated_queues'] = set()
438
+
439
+ full_queue = stat['full_queue_name']
440
+ if full_queue not in grouped_data[base_queue_name].get('calculated_queues', set()):
441
+ grouped_data[base_queue_name]['total_length'] += stat['queue_length']
442
+ grouped_data[base_queue_name].setdefault('calculated_queues', set()).add(full_queue)
443
+
444
+ # 转换为列表并处理优先级队列
445
+ result = []
446
+ for queue_data in grouped_data.values():
447
+ # 清理临时字段
448
+ queue_data.pop('calculated_queues', None)
449
+
450
+ # 转换优先级队列集合为排序列表
451
+ queue_data['priority_queues'] = sorted(list(queue_data['priority_queues']))
452
+ queue_data['consumer_groups_count'] = len(queue_data['consumer_groups'])
453
+
454
+ # 获取该基础队列的活跃Workers数
455
+ queue_data['active_workers'] = await self._get_active_workers_for_queue(queue_data['queue_name'])
456
+
457
+ # 计算总体成功率
458
+ total_tasks = queue_data['total_success'] + queue_data['total_failed']
459
+ if total_tasks > 0:
460
+ queue_data['overall_success_rate'] = round(
461
+ (queue_data['total_success'] / total_tasks) * 100, 2
462
+ )
463
+ else:
464
+ queue_data['overall_success_rate'] = 0.0
465
+
466
+ result.append(queue_data)
467
+
468
+ # 按队列名排序
469
+ result.sort(key=lambda x: x['queue_name'])
470
+
471
+ return result
472
+
473
+
474
+ def _get_base_queue_name(self, queue_name: str) -> str:
475
+ """提取基础队列名(去除优先级后缀)"""
476
+ # 检查是否包含优先级后缀(格式: queue_name:priority)
477
+ if ':' in queue_name:
478
+ parts = queue_name.rsplit(':', 1)
479
+ # 检查最后一部分是否是数字(优先级)
480
+ if parts[-1].isdigit():
481
+ return parts[0]
482
+ return queue_name
483
+
484
+ def _extract_priority(self, queue_name: str) -> int:
485
+ """从队列名中提取优先级,无优先级后缀的返回0"""
486
+ if ':' in queue_name:
487
+ parts = queue_name.rsplit(':', 1)
488
+ if parts[-1].isdigit():
489
+ return int(parts[-1])
490
+ return 0 # 没有优先级后缀的队列默认优先级为0
491
+
492
+ def _extract_task_name(self, group_name: str) -> str:
493
+ """从消费者组名中提取任务名"""
494
+ # 消费者组名格式: prefix:QUEUE:queue_name:task_name
495
+ # 或: prefix:QUEUE:queue_name(默认组)
496
+ if ':' in group_name:
497
+ parts = group_name.split(':')
498
+ # 查找QUEUE后的部分
499
+ try:
500
+ queue_idx = parts.index('QUEUE')
501
+ # task_name是QUEUE后的第二个部分(如果存在)
502
+ if len(parts) > queue_idx + 2:
503
+ # 返回最后一个部分作为task_name
504
+ return parts[-1]
505
+ except ValueError:
506
+ pass
507
+ return 'default'
508
+
509
+ def _is_default_idle_group(self, group_name: str, base_queue_name: str) -> bool:
510
+ """检查是否是空闲的默认消费者组"""
511
+ # 默认组的格式是: prefix:QUEUE:queue_name(没有task后缀)
512
+ if ':QUEUE:' in group_name:
513
+ parts = group_name.split(':QUEUE:')
514
+ if len(parts) == 2:
515
+ queue_part = parts[1]
516
+ # 如果queue_part就是base_queue_name或带优先级版本,说明是默认组
517
+ if queue_part == base_queue_name or queue_part.startswith(f"{base_queue_name}:"):
518
+ # 检查是否没有task后缀
519
+ if ':' not in queue_part.replace(f"{base_queue_name}:", ""):
520
+ return True
521
+ return False