jettask 0.2.20__py3-none-any.whl → 0.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. jettask/__init__.py +4 -0
  2. jettask/cli.py +12 -8
  3. jettask/config/lua_scripts.py +37 -0
  4. jettask/config/nacos_config.py +1 -1
  5. jettask/core/app.py +313 -340
  6. jettask/core/container.py +4 -4
  7. jettask/{persistence → core}/namespace.py +93 -27
  8. jettask/core/task.py +16 -9
  9. jettask/core/unified_manager_base.py +136 -26
  10. jettask/db/__init__.py +67 -0
  11. jettask/db/base.py +137 -0
  12. jettask/{utils/db_connector.py → db/connector.py} +130 -26
  13. jettask/db/models/__init__.py +16 -0
  14. jettask/db/models/scheduled_task.py +196 -0
  15. jettask/db/models/task.py +77 -0
  16. jettask/db/models/task_run.py +85 -0
  17. jettask/executor/__init__.py +0 -15
  18. jettask/executor/core.py +76 -31
  19. jettask/executor/process_entry.py +29 -114
  20. jettask/executor/task_executor.py +4 -0
  21. jettask/messaging/event_pool.py +928 -685
  22. jettask/messaging/scanner.py +30 -0
  23. jettask/persistence/__init__.py +28 -103
  24. jettask/persistence/buffer.py +170 -0
  25. jettask/persistence/consumer.py +330 -249
  26. jettask/persistence/manager.py +304 -0
  27. jettask/persistence/persistence.py +391 -0
  28. jettask/scheduler/__init__.py +15 -3
  29. jettask/scheduler/{task_crud.py → database.py} +61 -57
  30. jettask/scheduler/loader.py +2 -2
  31. jettask/scheduler/{scheduler_coordinator.py → manager.py} +23 -6
  32. jettask/scheduler/models.py +14 -10
  33. jettask/scheduler/schedule.py +166 -0
  34. jettask/scheduler/scheduler.py +12 -11
  35. jettask/schemas/__init__.py +50 -1
  36. jettask/schemas/backlog.py +43 -6
  37. jettask/schemas/namespace.py +70 -19
  38. jettask/schemas/queue.py +19 -3
  39. jettask/schemas/responses.py +493 -0
  40. jettask/task/__init__.py +0 -2
  41. jettask/task/router.py +3 -0
  42. jettask/test_connection_monitor.py +1 -1
  43. jettask/utils/__init__.py +7 -5
  44. jettask/utils/db_init.py +8 -4
  45. jettask/utils/namespace_dep.py +167 -0
  46. jettask/utils/queue_matcher.py +186 -0
  47. jettask/utils/rate_limit/concurrency_limiter.py +7 -1
  48. jettask/utils/stream_backlog.py +1 -1
  49. jettask/webui/__init__.py +0 -1
  50. jettask/webui/api/__init__.py +4 -4
  51. jettask/webui/api/alerts.py +806 -71
  52. jettask/webui/api/example_refactored.py +400 -0
  53. jettask/webui/api/namespaces.py +390 -45
  54. jettask/webui/api/overview.py +300 -54
  55. jettask/webui/api/queues.py +971 -267
  56. jettask/webui/api/scheduled.py +1249 -56
  57. jettask/webui/api/settings.py +129 -7
  58. jettask/webui/api/workers.py +442 -0
  59. jettask/webui/app.py +46 -2329
  60. jettask/webui/middleware/__init__.py +6 -0
  61. jettask/webui/middleware/namespace_middleware.py +135 -0
  62. jettask/webui/services/__init__.py +146 -0
  63. jettask/webui/services/heartbeat_service.py +251 -0
  64. jettask/webui/services/overview_service.py +60 -51
  65. jettask/webui/services/queue_monitor_service.py +426 -0
  66. jettask/webui/services/redis_monitor_service.py +87 -0
  67. jettask/webui/services/settings_service.py +174 -111
  68. jettask/webui/services/task_monitor_service.py +222 -0
  69. jettask/webui/services/timeline_pg_service.py +452 -0
  70. jettask/webui/services/timeline_service.py +189 -0
  71. jettask/webui/services/worker_monitor_service.py +467 -0
  72. jettask/webui/utils/__init__.py +11 -0
  73. jettask/webui/utils/time_utils.py +122 -0
  74. jettask/worker/lifecycle.py +8 -2
  75. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/METADATA +1 -1
  76. jettask-0.2.24.dist-info/RECORD +142 -0
  77. jettask/executor/executor.py +0 -338
  78. jettask/persistence/backlog_monitor.py +0 -567
  79. jettask/persistence/base.py +0 -2334
  80. jettask/persistence/db_manager.py +0 -516
  81. jettask/persistence/maintenance.py +0 -81
  82. jettask/persistence/message_consumer.py +0 -259
  83. jettask/persistence/models.py +0 -49
  84. jettask/persistence/offline_recovery.py +0 -196
  85. jettask/persistence/queue_discovery.py +0 -215
  86. jettask/persistence/task_persistence.py +0 -218
  87. jettask/persistence/task_updater.py +0 -583
  88. jettask/scheduler/add_execution_count.sql +0 -11
  89. jettask/scheduler/add_priority_field.sql +0 -26
  90. jettask/scheduler/add_scheduler_id.sql +0 -25
  91. jettask/scheduler/add_scheduler_id_index.sql +0 -10
  92. jettask/scheduler/make_scheduler_id_required.sql +0 -28
  93. jettask/scheduler/migrate_interval_seconds.sql +0 -9
  94. jettask/scheduler/performance_optimization.sql +0 -45
  95. jettask/scheduler/run_scheduler.py +0 -186
  96. jettask/scheduler/schema.sql +0 -84
  97. jettask/task/task_executor.py +0 -318
  98. jettask/webui/api/analytics.py +0 -323
  99. jettask/webui/config.py +0 -90
  100. jettask/webui/models/__init__.py +0 -3
  101. jettask/webui/models/namespace.py +0 -63
  102. jettask/webui/namespace_manager/__init__.py +0 -10
  103. jettask/webui/namespace_manager/multi.py +0 -593
  104. jettask/webui/namespace_manager/unified.py +0 -193
  105. jettask/webui/run.py +0 -46
  106. jettask-0.2.20.dist-info/RECORD +0 -145
  107. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/WHEEL +0 -0
  108. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/entry_points.txt +0 -0
  109. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/licenses/LICENSE +0 -0
  110. {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/top_level.txt +0 -0
@@ -11,7 +11,7 @@ from sqlalchemy import text
11
11
  from sqlalchemy.ext.asyncio import AsyncSession
12
12
 
13
13
  from jettask.schemas import TimeRangeQuery
14
- from jettask.persistence.db_manager import get_db_manager
14
+ from jettask.db.connector import get_async_redis_client, get_pg_engine_and_factory
15
15
 
16
16
  logger = logging.getLogger(__name__)
17
17
 
@@ -36,14 +36,18 @@ class OverviewService:
36
36
  def get_root_info() -> Dict[str, Any]:
37
37
  """
38
38
  获取根路径信息
39
-
39
+
40
40
  Returns:
41
41
  API基本信息
42
42
  """
43
43
  return {
44
- "message": "JetTask Monitor API",
45
- "version": "1.0.0",
46
- "timestamp": datetime.now(timezone.utc).isoformat()
44
+ "success": True,
45
+ "data": {
46
+ "service": "JetTask WebUI API",
47
+ "version": "v1",
48
+ "status": "running",
49
+ "timestamp": datetime.now(timezone.utc).isoformat()
50
+ }
47
51
  }
48
52
 
49
53
  @staticmethod
@@ -63,16 +67,16 @@ class OverviewService:
63
67
  async def get_system_stats(namespace: str) -> Dict[str, Any]:
64
68
  """
65
69
  获取指定命名空间的系统统计信息
66
-
70
+
67
71
  Args:
68
72
  namespace: 命名空间名称
69
-
73
+
70
74
  Returns:
71
75
  系统统计信息
72
76
  """
73
- db_manager = get_db_manager()
74
- pool = await db_manager.get_pool(namespace)
75
- redis_client = await pool.get_redis_client()
77
+ import os
78
+ redis_url = os.environ.get('JETTASK_REDIS_URL', 'redis://localhost:6379/0')
79
+ redis_client = get_async_redis_client(redis_url, decode_responses=True)
76
80
 
77
81
  try:
78
82
  # 统计各种类型的键
@@ -136,34 +140,35 @@ class OverviewService:
136
140
  ) -> Dict[str, Any]:
137
141
  """
138
142
  获取仪表板统计数据
139
-
143
+
140
144
  Args:
141
145
  namespace: 命名空间名称
142
146
  time_range: 时间范围
143
147
  queues: 逗号分隔的队列名称列表
144
-
148
+
145
149
  Returns:
146
150
  仪表板统计数据
147
151
  """
148
- db_manager = get_db_manager()
149
- pool = await db_manager.get_pool(namespace)
150
-
151
- # 如果没有PostgreSQL配置,返回空数据
152
- # 检查是否配置了PostgreSQL
153
- if not pool.config.has_postgres():
152
+ import os
153
+
154
+ # 获取 PostgreSQL 配置
155
+ pg_url = os.environ.get('JETTASK_PG_URL')
156
+ if not pg_url:
154
157
  return {
155
158
  "success": True,
156
159
  "data": _get_empty_dashboard_stats()
157
160
  }
158
-
161
+
159
162
  # 计算时间范围
160
163
  end_time = datetime.now(timezone.utc)
161
164
  start_time = _parse_time_range(time_range, end_time)
162
-
165
+
163
166
  # 构建队列筛选条件
164
167
  queue_filter, queue_list, queue_params = _build_queue_filter_and_params(queues)
165
-
166
- async with pool.get_sa_session() as session:
168
+
169
+ # 获取数据库会话
170
+ _, session_factory = get_pg_engine_and_factory(pg_url)
171
+ async with session_factory() as session:
167
172
  # 获取统计数据
168
173
  stats_data = await _get_task_statistics(
169
174
  session, namespace, start_time, end_time,
@@ -232,31 +237,31 @@ class OverviewService:
232
237
  ) -> Dict[str, Any]:
233
238
  """
234
239
  获取概览页面的统一统计数据
235
-
240
+
236
241
  Args:
237
242
  namespace: 命名空间名称
238
243
  query: 时间范围查询参数
239
-
244
+
240
245
  Returns:
241
246
  统一的时间序列数据
242
247
  """
243
- db_manager = get_db_manager()
244
- pool = await db_manager.get_pool(namespace)
245
-
246
- # 如果没有PostgreSQL配置,返回空数据
247
- # 检查是否配置了PostgreSQL
248
- if not pool.config.has_postgres():
248
+ import os
249
+
250
+ # 获取 PostgreSQL 配置
251
+ pg_url = os.environ.get('JETTASK_PG_URL')
252
+ if not pg_url:
249
253
  return _get_empty_overview_stats()
250
-
254
+
251
255
  # 解析时间范围
252
256
  time_range_result = _parse_time_range_query(query)
253
-
257
+
254
258
  # 构建队列筛选条件
255
- # 使用 TimeRangeQuery 的 queues 字段(支持多队列列表)
256
259
  queue_list = query.queues if hasattr(query, 'queues') and query.queues else None
257
260
  queue_filter, _, queue_params = _build_queue_filter_and_params(queue_list)
258
-
259
- async with pool.get_sa_session() as session:
261
+
262
+ # 获取数据库会话
263
+ _, session_factory = get_pg_engine_and_factory(pg_url)
264
+ async with session_factory() as session:
260
265
  # 执行统一查询
261
266
  result = await _execute_overview_query(
262
267
  session, namespace, time_range_result,
@@ -531,17 +536,19 @@ async def _get_task_distribution(session, namespace, start_time, end_time, queue
531
536
 
532
537
  async def _get_top_backlog_queues(namespace, limit, time_range, queues):
533
538
  """获取积压最多的队列Top N"""
534
- db_manager = get_db_manager()
535
- pool = await db_manager.get_pool(namespace)
536
-
537
- # 检查是否配置了PostgreSQL
538
- if not pool.config.has_postgres():
539
+ import os
540
+
541
+ # 获取 PostgreSQL 配置
542
+ pg_url = os.environ.get('JETTASK_PG_URL')
543
+ if not pg_url:
539
544
  return {"success": True, "data": []}
540
-
545
+
541
546
  end_time = datetime.now(timezone.utc)
542
547
  start_time = _parse_time_range(time_range, end_time)
543
-
544
- async with pool.get_sa_session() as session:
548
+
549
+ # 获取数据库会话
550
+ _, session_factory = get_pg_engine_and_factory(pg_url)
551
+ async with session_factory() as session:
545
552
  queue_list = []
546
553
  if queues:
547
554
  # 如果是字符串,按逗号分割(向后兼容)
@@ -664,17 +671,19 @@ async def _get_top_backlog_from_tasks(session, namespace, limit):
664
671
 
665
672
  async def _get_top_error_queues(namespace, limit, time_range, queues):
666
673
  """获取错误率最高的队列Top N"""
667
- db_manager = get_db_manager()
668
- pool = await db_manager.get_pool(namespace)
669
-
670
- # 检查是否配置了PostgreSQL
671
- if not pool.config.has_postgres():
674
+ import os
675
+
676
+ # 获取 PostgreSQL 配置
677
+ pg_url = os.environ.get('JETTASK_PG_URL')
678
+ if not pg_url:
672
679
  return {"success": True, "data": []}
673
-
680
+
674
681
  end_time = datetime.now(timezone.utc)
675
682
  start_time = _parse_time_range(time_range, end_time)
676
-
677
- async with pool.get_sa_session() as session:
683
+
684
+ # 获取数据库会话
685
+ _, session_factory = get_pg_engine_and_factory(pg_url)
686
+ async with session_factory() as session:
678
687
  queue_list = []
679
688
  if queues:
680
689
  queue_list = [q.strip() for q in queues.split(',') if q.strip()]
@@ -0,0 +1,426 @@
1
+ """
2
+ 队列监控服务
3
+
4
+ 提供队列相关的监控功能
5
+ """
6
+ import logging
7
+ import time
8
+ import json
9
+ from typing import Dict, Any, List, Optional
10
+ from datetime import datetime
11
+
12
+ from .redis_monitor_service import RedisMonitorService
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class QueueMonitorService:
18
+ """队列监控服务类"""
19
+
20
+ def __init__(self, redis_service: RedisMonitorService):
21
+ """
22
+ 初始化队列监控服务
23
+
24
+ Args:
25
+ redis_service: Redis 监控基础服务实例
26
+ """
27
+ self.redis_service = redis_service
28
+
29
+ @property
30
+ def redis(self):
31
+ """获取 Redis 客户端"""
32
+ return self.redis_service.redis
33
+
34
+ @property
35
+ def redis_prefix(self) -> str:
36
+ """获取 Redis 前缀"""
37
+ return self.redis_service.redis_prefix
38
+
39
+ async def get_all_queues(self) -> List[str]:
40
+ """
41
+ 获取所有队列名称
42
+
43
+ Returns:
44
+ 队列名称列表(已排序)
45
+ """
46
+ try:
47
+ # 检查缓存是否有效
48
+ current_time = time.time()
49
+ if (self.redis_service._queues_cache is not None and
50
+ (current_time - self.redis_service._queues_cache_time) < self.redis_service._queues_cache_ttl):
51
+ logger.debug("Returning cached queue list")
52
+ return self.redis_service._queues_cache
53
+
54
+ # 优先尝试从全局队列集合获取
55
+ global_queues_key = f'{self.redis_prefix}:global:queues'
56
+ queues = await self.redis.smembers(global_queues_key)
57
+
58
+ if queues:
59
+ # 如果有全局队列集合,直接使用
60
+ result = sorted(list(queues))
61
+ self.redis_service._queues_cache = result
62
+ self.redis_service._queues_cache_time = current_time
63
+ logger.info(f"Retrieved {len(result)} queues from global set")
64
+ return result
65
+
66
+ # 如果没有全局队列集合,使用 QueueRegistry
67
+ from jettask.messaging.registry import QueueRegistry
68
+
69
+ queue_registry = QueueRegistry(
70
+ redis_client=None,
71
+ async_redis_client=self.redis,
72
+ redis_prefix=self.redis_prefix
73
+ )
74
+
75
+ # 获取所有队列
76
+ queues = await queue_registry.get_all_queues()
77
+
78
+ # 返回排序后的队列列表并更新缓存
79
+ result = sorted(list(queues))
80
+ self.redis_service._queues_cache = result
81
+ self.redis_service._queues_cache_time = current_time
82
+ logger.info(f"Retrieved {len(result)} queues from registry")
83
+ return result
84
+
85
+ except Exception as e:
86
+ logger.error(f"Error getting all queues: {e}", exc_info=True)
87
+ return []
88
+
89
+ async def get_queue_stats(self, queue_name: str) -> Dict[str, Any]:
90
+ """
91
+ 获取队列统计信息(RabbitMQ 兼容格式)
92
+
93
+ Args:
94
+ queue_name: 队列名称
95
+
96
+ Returns:
97
+ 队列统计信息字典
98
+ """
99
+ prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
100
+
101
+ try:
102
+ # 获取 Stream 信息和消费者组信息
103
+ info = await self.redis.xinfo_stream(prefixed_queue_name)
104
+ groups = await self.redis.xinfo_groups(prefixed_queue_name)
105
+
106
+ except Exception as e:
107
+ # 如果队列不存在,返回默认值
108
+ logger.warning(f"Queue {queue_name} does not exist or has no groups: {e}")
109
+ return {
110
+ "queue": queue_name,
111
+ "messages": 0,
112
+ "messages_ready": 0,
113
+ "messages_unacknowledged": 0,
114
+ "consumers": 0,
115
+ "message_stats": {
116
+ "publish": 0,
117
+ "deliver_get": 0,
118
+ "ack": 0
119
+ },
120
+ "consumer_groups": [],
121
+ "error": str(e)
122
+ }
123
+
124
+ # 计算基础指标
125
+ total_messages = info["length"]
126
+ total_pending = 0
127
+ total_consumers = 0
128
+ total_delivered = 0
129
+
130
+ consumer_groups_info = []
131
+
132
+ for group in groups:
133
+ group_pending = group["pending"]
134
+ group_consumers_count = group["consumers"]
135
+
136
+ total_pending += group_pending
137
+ total_consumers += group_consumers_count
138
+
139
+ group_info = {
140
+ "name": group["name"],
141
+ "consumers": group_consumers_count,
142
+ "pending": group_pending,
143
+ "last_delivered_id": group["last-delivered-id"]
144
+ }
145
+
146
+ # 获取消费者详情
147
+ try:
148
+ consumers = await self.redis.xinfo_consumers(prefixed_queue_name, group["name"])
149
+ group_info["consumer_details"] = consumers
150
+
151
+ # 从消费者统计中计算 deliver 数量
152
+ for consumer in consumers:
153
+ total_delivered += consumer.get("pel-count", 0)
154
+
155
+ except Exception as e:
156
+ group_info["consumer_details"] = []
157
+ logger.warning(f"Error getting consumers for group {group['name']}: {e}")
158
+
159
+ consumer_groups_info.append(group_info)
160
+
161
+ # 从 Worker 统计中获取更精确的消息统计(需要 Worker 监控服务)
162
+ try:
163
+ from .worker_monitor_service import WorkerMonitorService
164
+ worker_service = WorkerMonitorService(self.redis_service)
165
+ worker_summary = await worker_service.get_queue_worker_summary_fast(queue_name)
166
+
167
+ publish_count = worker_summary.get('total_count', 0)
168
+ deliver_count = worker_summary.get('total_success_count', 0) + worker_summary.get('total_failed_count', 0)
169
+ ack_count = worker_summary.get('total_success_count', 0)
170
+ avg_processing_time = worker_summary.get('avg_processing_time', 0.0)
171
+ avg_latency_time = worker_summary.get('avg_latency_time', 0.0)
172
+ total_running_tasks = worker_summary.get('total_running_tasks', 0)
173
+ except Exception as e:
174
+ logger.warning(f"Error getting worker summary for queue {queue_name}: {e}")
175
+ publish_count = 0
176
+ deliver_count = 0
177
+ ack_count = 0
178
+ avg_processing_time = 0.0
179
+ avg_latency_time = 0.0
180
+ total_running_tasks = 0
181
+
182
+ # 计算就绪消息数(队列总长度 - 未确认消息数)
183
+ messages_ready = max(0, total_messages - total_pending)
184
+
185
+ # RabbitMQ 风格的统计信息
186
+ stats = {
187
+ "queue": queue_name,
188
+ # RabbitMQ 兼容指标
189
+ "messages": total_messages, # 队列中消息总数
190
+ "messages_ready": messages_ready, # 就绪状态的消息数
191
+ "messages_unacknowledged": total_pending, # 未确认的消息数
192
+ "consumers": total_consumers, # 消费者数量
193
+ "message_stats": {
194
+ "publish": publish_count, # 发布到队列的消息数量
195
+ "deliver_get": deliver_count, # 被消费的消息数量
196
+ "ack": ack_count # 被确认的消息数量
197
+ },
198
+ # 原有详细信息保持兼容性
199
+ "length": info["length"],
200
+ "first_entry": info.get("first-entry"),
201
+ "last_entry": info.get("last-entry"),
202
+ "consumer_groups": consumer_groups_info,
203
+ # 额外的性能指标
204
+ "performance_stats": {
205
+ "avg_processing_time": avg_processing_time,
206
+ "avg_latency_time": avg_latency_time,
207
+ "total_running_tasks": total_running_tasks
208
+ }
209
+ }
210
+
211
+ logger.debug(f"Queue stats for {queue_name}: {total_messages} messages, {total_consumers} consumers")
212
+ return stats
213
+
214
+ async def get_stream_info(self, queue_name: str, event_id: str) -> Optional[Dict[str, Any]]:
215
+ """
216
+ 从 Stream 中获取任务详细信息
217
+
218
+ Args:
219
+ queue_name: 队列名称
220
+ event_id: 事件 ID 或消息 ID
221
+
222
+ Returns:
223
+ 任务信息字典,如果未找到则返回 None
224
+ """
225
+ try:
226
+ prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
227
+
228
+ # 先尝试按 event_id 直接查找
229
+ messages = await self.redis.xrange(prefixed_queue_name, min=event_id, max=event_id, count=1)
230
+
231
+ if messages:
232
+ msg_id, data = messages[0]
233
+ logger.debug(f"Found task {event_id} in queue {queue_name}")
234
+ return {
235
+ "message_id": msg_id,
236
+ "data": data,
237
+ "queue": queue_name
238
+ }
239
+
240
+ # 如果没找到,可能 event_id 是消息内容的一部分,扫描最近的消息
241
+ messages = await self.redis.xrange(prefixed_queue_name, count=100)
242
+ for msg_id, data in messages:
243
+ if data.get("event_id") == event_id or data.get("id") == event_id:
244
+ logger.debug(f"Found task {event_id} in recent messages")
245
+ return {
246
+ "message_id": msg_id,
247
+ "data": data,
248
+ "queue": queue_name
249
+ }
250
+
251
+ logger.warning(f"Task {event_id} not found in queue {queue_name}")
252
+ return None
253
+
254
+ except Exception as e:
255
+ logger.error(f"Error reading from stream {prefixed_queue_name}: {e}", exc_info=True)
256
+ return None
257
+
258
+ async def get_queue_tasks(
259
+ self,
260
+ queue_name: str,
261
+ start_time: Optional[str] = None,
262
+ end_time: Optional[str] = None,
263
+ limit: int = 100
264
+ ) -> Dict[str, Any]:
265
+ """
266
+ 获取指定队列的任务(基于时间范围)
267
+
268
+ Args:
269
+ queue_name: 队列名称
270
+ start_time: 开始时间(Redis Stream ID 格式或时间戳)
271
+ end_time: 结束时间(Redis Stream ID 格式或时间戳)
272
+ limit: 返回的最大任务数
273
+
274
+ Returns:
275
+ 包含任务列表和分页信息的字典
276
+ """
277
+ all_tasks = []
278
+
279
+ try:
280
+ # 处理时间参数
281
+ if not end_time:
282
+ end_time = '+'
283
+ if not start_time:
284
+ start_time = '-'
285
+
286
+ # 从队列的 stream 中读取消息
287
+ prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
288
+ messages = await self.redis.xrevrange(
289
+ prefixed_queue_name,
290
+ max=end_time,
291
+ min=start_time,
292
+ count=limit
293
+ )
294
+
295
+ for msg_id, data in messages:
296
+ event_id = msg_id
297
+
298
+ # 构建任务信息
299
+ task_info = {
300
+ "event_id": event_id,
301
+ "message_id": msg_id,
302
+ "stream_data": data,
303
+ "task_name": data.get("name", "unknown"),
304
+ "queue": data.get("queue", queue_name),
305
+ "trigger_time": data.get("trigger_time")
306
+ }
307
+
308
+ # 尝试解析 args 和 kwargs
309
+ params_str = self._parse_task_params(data)
310
+ task_info["params_str"] = params_str
311
+
312
+ # 从状态键获取信息
313
+ status_info = await self._get_task_status(event_id, queue_name, data)
314
+ task_info.update(status_info)
315
+
316
+ all_tasks.append(task_info)
317
+
318
+ logger.info(f"Retrieved {len(all_tasks)} tasks from queue {queue_name}")
319
+
320
+ except Exception as e:
321
+ logger.error(f"Error reading queue {queue_name}: {e}", exc_info=True)
322
+ return {
323
+ "tasks": [],
324
+ "count": 0,
325
+ "oldest_id": None,
326
+ "newest_id": None,
327
+ "has_more": False,
328
+ "limit": limit
329
+ }
330
+
331
+ # 获取最早和最晚的消息 ID 用于分页导航
332
+ oldest_id = all_tasks[-1]["message_id"] if all_tasks else None
333
+ newest_id = all_tasks[0]["message_id"] if all_tasks else None
334
+
335
+ return {
336
+ "tasks": all_tasks,
337
+ "count": len(all_tasks),
338
+ "oldest_id": oldest_id,
339
+ "newest_id": newest_id,
340
+ "has_more": len(all_tasks) >= limit,
341
+ "limit": limit
342
+ }
343
+
344
+ def _parse_task_params(self, data: Dict[str, Any]) -> str:
345
+ """
346
+ 解析任务参数
347
+
348
+ Args:
349
+ data: 任务数据
350
+
351
+ Returns:
352
+ 参数字符串
353
+ """
354
+ try:
355
+ args_list = []
356
+ kwargs_dict = {}
357
+
358
+ if data.get("args"):
359
+ args_list = json.loads(data["args"])
360
+
361
+ if data.get("kwargs"):
362
+ kwargs_dict = json.loads(data["kwargs"])
363
+
364
+ # 构建参数字符串
365
+ params_parts = []
366
+ if args_list:
367
+ params_parts.extend([str(arg) for arg in args_list])
368
+ if kwargs_dict:
369
+ params_parts.extend([f"{k}={v}" for k, v in kwargs_dict.items()])
370
+
371
+ return ", ".join(params_parts) if params_parts else "无参数"
372
+
373
+ except Exception as e:
374
+ logger.warning(f"Error parsing task params: {e}")
375
+ return "解析失败"
376
+
377
+ async def _get_task_status(
378
+ self,
379
+ event_id: str,
380
+ queue_name: str,
381
+ data: Dict[str, Any]
382
+ ) -> Dict[str, Any]:
383
+ """
384
+ 获取任务状态
385
+
386
+ Args:
387
+ event_id: 事件 ID
388
+ queue_name: 队列名称
389
+ data: 任务数据
390
+
391
+ Returns:
392
+ 包含状态信息的字典
393
+ """
394
+ status_key = f"{self.redis_prefix}:STATUS:{event_id}"
395
+ status = await self.redis.get(status_key)
396
+
397
+ if status:
398
+ try:
399
+ parsed_status = json.loads(status)
400
+ return {
401
+ "status": status,
402
+ "parsed_status": parsed_status,
403
+ "consumer": parsed_status.get("consumer", "-")
404
+ }
405
+ except Exception as e:
406
+ logger.warning(f"Error parsing status for task {event_id}: {e}")
407
+ return {
408
+ "status": status,
409
+ "parsed_status": {"status": "unknown"},
410
+ "consumer": "-"
411
+ }
412
+ else:
413
+ # 构建默认状态
414
+ default_status = {
415
+ "status": "未知",
416
+ "queue": queue_name,
417
+ "created_at": datetime.fromtimestamp(
418
+ float(data.get("trigger_time", 0))
419
+ ).isoformat() if data.get("trigger_time") else None
420
+ }
421
+
422
+ return {
423
+ "status": json.dumps(default_status),
424
+ "parsed_status": default_status,
425
+ "consumer": "-"
426
+ }