jettask 0.2.20__py3-none-any.whl → 0.2.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jettask/__init__.py +4 -0
- jettask/cli.py +12 -8
- jettask/config/lua_scripts.py +37 -0
- jettask/config/nacos_config.py +1 -1
- jettask/core/app.py +313 -340
- jettask/core/container.py +4 -4
- jettask/{persistence → core}/namespace.py +93 -27
- jettask/core/task.py +16 -9
- jettask/core/unified_manager_base.py +136 -26
- jettask/db/__init__.py +67 -0
- jettask/db/base.py +137 -0
- jettask/{utils/db_connector.py → db/connector.py} +130 -26
- jettask/db/models/__init__.py +16 -0
- jettask/db/models/scheduled_task.py +196 -0
- jettask/db/models/task.py +77 -0
- jettask/db/models/task_run.py +85 -0
- jettask/executor/__init__.py +0 -15
- jettask/executor/core.py +76 -31
- jettask/executor/process_entry.py +29 -114
- jettask/executor/task_executor.py +4 -0
- jettask/messaging/event_pool.py +928 -685
- jettask/messaging/scanner.py +30 -0
- jettask/persistence/__init__.py +28 -103
- jettask/persistence/buffer.py +170 -0
- jettask/persistence/consumer.py +330 -249
- jettask/persistence/manager.py +304 -0
- jettask/persistence/persistence.py +391 -0
- jettask/scheduler/__init__.py +15 -3
- jettask/scheduler/{task_crud.py → database.py} +61 -57
- jettask/scheduler/loader.py +2 -2
- jettask/scheduler/{scheduler_coordinator.py → manager.py} +23 -6
- jettask/scheduler/models.py +14 -10
- jettask/scheduler/schedule.py +166 -0
- jettask/scheduler/scheduler.py +12 -11
- jettask/schemas/__init__.py +50 -1
- jettask/schemas/backlog.py +43 -6
- jettask/schemas/namespace.py +70 -19
- jettask/schemas/queue.py +19 -3
- jettask/schemas/responses.py +493 -0
- jettask/task/__init__.py +0 -2
- jettask/task/router.py +3 -0
- jettask/test_connection_monitor.py +1 -1
- jettask/utils/__init__.py +7 -5
- jettask/utils/db_init.py +8 -4
- jettask/utils/namespace_dep.py +167 -0
- jettask/utils/queue_matcher.py +186 -0
- jettask/utils/rate_limit/concurrency_limiter.py +7 -1
- jettask/utils/stream_backlog.py +1 -1
- jettask/webui/__init__.py +0 -1
- jettask/webui/api/__init__.py +4 -4
- jettask/webui/api/alerts.py +806 -71
- jettask/webui/api/example_refactored.py +400 -0
- jettask/webui/api/namespaces.py +390 -45
- jettask/webui/api/overview.py +300 -54
- jettask/webui/api/queues.py +971 -267
- jettask/webui/api/scheduled.py +1249 -56
- jettask/webui/api/settings.py +129 -7
- jettask/webui/api/workers.py +442 -0
- jettask/webui/app.py +46 -2329
- jettask/webui/middleware/__init__.py +6 -0
- jettask/webui/middleware/namespace_middleware.py +135 -0
- jettask/webui/services/__init__.py +146 -0
- jettask/webui/services/heartbeat_service.py +251 -0
- jettask/webui/services/overview_service.py +60 -51
- jettask/webui/services/queue_monitor_service.py +426 -0
- jettask/webui/services/redis_monitor_service.py +87 -0
- jettask/webui/services/settings_service.py +174 -111
- jettask/webui/services/task_monitor_service.py +222 -0
- jettask/webui/services/timeline_pg_service.py +452 -0
- jettask/webui/services/timeline_service.py +189 -0
- jettask/webui/services/worker_monitor_service.py +467 -0
- jettask/webui/utils/__init__.py +11 -0
- jettask/webui/utils/time_utils.py +122 -0
- jettask/worker/lifecycle.py +8 -2
- {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/METADATA +1 -1
- jettask-0.2.24.dist-info/RECORD +142 -0
- jettask/executor/executor.py +0 -338
- jettask/persistence/backlog_monitor.py +0 -567
- jettask/persistence/base.py +0 -2334
- jettask/persistence/db_manager.py +0 -516
- jettask/persistence/maintenance.py +0 -81
- jettask/persistence/message_consumer.py +0 -259
- jettask/persistence/models.py +0 -49
- jettask/persistence/offline_recovery.py +0 -196
- jettask/persistence/queue_discovery.py +0 -215
- jettask/persistence/task_persistence.py +0 -218
- jettask/persistence/task_updater.py +0 -583
- jettask/scheduler/add_execution_count.sql +0 -11
- jettask/scheduler/add_priority_field.sql +0 -26
- jettask/scheduler/add_scheduler_id.sql +0 -25
- jettask/scheduler/add_scheduler_id_index.sql +0 -10
- jettask/scheduler/make_scheduler_id_required.sql +0 -28
- jettask/scheduler/migrate_interval_seconds.sql +0 -9
- jettask/scheduler/performance_optimization.sql +0 -45
- jettask/scheduler/run_scheduler.py +0 -186
- jettask/scheduler/schema.sql +0 -84
- jettask/task/task_executor.py +0 -318
- jettask/webui/api/analytics.py +0 -323
- jettask/webui/config.py +0 -90
- jettask/webui/models/__init__.py +0 -3
- jettask/webui/models/namespace.py +0 -63
- jettask/webui/namespace_manager/__init__.py +0 -10
- jettask/webui/namespace_manager/multi.py +0 -593
- jettask/webui/namespace_manager/unified.py +0 -193
- jettask/webui/run.py +0 -46
- jettask-0.2.20.dist-info/RECORD +0 -145
- {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/WHEEL +0 -0
- {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/entry_points.txt +0 -0
- {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/licenses/LICENSE +0 -0
- {jettask-0.2.20.dist-info → jettask-0.2.24.dist-info}/top_level.txt +0 -0
@@ -11,7 +11,7 @@ from sqlalchemy import text
|
|
11
11
|
from sqlalchemy.ext.asyncio import AsyncSession
|
12
12
|
|
13
13
|
from jettask.schemas import TimeRangeQuery
|
14
|
-
from jettask.
|
14
|
+
from jettask.db.connector import get_async_redis_client, get_pg_engine_and_factory
|
15
15
|
|
16
16
|
logger = logging.getLogger(__name__)
|
17
17
|
|
@@ -36,14 +36,18 @@ class OverviewService:
|
|
36
36
|
def get_root_info() -> Dict[str, Any]:
|
37
37
|
"""
|
38
38
|
获取根路径信息
|
39
|
-
|
39
|
+
|
40
40
|
Returns:
|
41
41
|
API基本信息
|
42
42
|
"""
|
43
43
|
return {
|
44
|
-
"
|
45
|
-
"
|
46
|
-
|
44
|
+
"success": True,
|
45
|
+
"data": {
|
46
|
+
"service": "JetTask WebUI API",
|
47
|
+
"version": "v1",
|
48
|
+
"status": "running",
|
49
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
50
|
+
}
|
47
51
|
}
|
48
52
|
|
49
53
|
@staticmethod
|
@@ -63,16 +67,16 @@ class OverviewService:
|
|
63
67
|
async def get_system_stats(namespace: str) -> Dict[str, Any]:
|
64
68
|
"""
|
65
69
|
获取指定命名空间的系统统计信息
|
66
|
-
|
70
|
+
|
67
71
|
Args:
|
68
72
|
namespace: 命名空间名称
|
69
|
-
|
73
|
+
|
70
74
|
Returns:
|
71
75
|
系统统计信息
|
72
76
|
"""
|
73
|
-
|
74
|
-
|
75
|
-
redis_client =
|
77
|
+
import os
|
78
|
+
redis_url = os.environ.get('JETTASK_REDIS_URL', 'redis://localhost:6379/0')
|
79
|
+
redis_client = get_async_redis_client(redis_url, decode_responses=True)
|
76
80
|
|
77
81
|
try:
|
78
82
|
# 统计各种类型的键
|
@@ -136,34 +140,35 @@ class OverviewService:
|
|
136
140
|
) -> Dict[str, Any]:
|
137
141
|
"""
|
138
142
|
获取仪表板统计数据
|
139
|
-
|
143
|
+
|
140
144
|
Args:
|
141
145
|
namespace: 命名空间名称
|
142
146
|
time_range: 时间范围
|
143
147
|
queues: 逗号分隔的队列名称列表
|
144
|
-
|
148
|
+
|
145
149
|
Returns:
|
146
150
|
仪表板统计数据
|
147
151
|
"""
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
if not pool.config.has_postgres():
|
152
|
+
import os
|
153
|
+
|
154
|
+
# 获取 PostgreSQL 配置
|
155
|
+
pg_url = os.environ.get('JETTASK_PG_URL')
|
156
|
+
if not pg_url:
|
154
157
|
return {
|
155
158
|
"success": True,
|
156
159
|
"data": _get_empty_dashboard_stats()
|
157
160
|
}
|
158
|
-
|
161
|
+
|
159
162
|
# 计算时间范围
|
160
163
|
end_time = datetime.now(timezone.utc)
|
161
164
|
start_time = _parse_time_range(time_range, end_time)
|
162
|
-
|
165
|
+
|
163
166
|
# 构建队列筛选条件
|
164
167
|
queue_filter, queue_list, queue_params = _build_queue_filter_and_params(queues)
|
165
|
-
|
166
|
-
|
168
|
+
|
169
|
+
# 获取数据库会话
|
170
|
+
_, session_factory = get_pg_engine_and_factory(pg_url)
|
171
|
+
async with session_factory() as session:
|
167
172
|
# 获取统计数据
|
168
173
|
stats_data = await _get_task_statistics(
|
169
174
|
session, namespace, start_time, end_time,
|
@@ -232,31 +237,31 @@ class OverviewService:
|
|
232
237
|
) -> Dict[str, Any]:
|
233
238
|
"""
|
234
239
|
获取概览页面的统一统计数据
|
235
|
-
|
240
|
+
|
236
241
|
Args:
|
237
242
|
namespace: 命名空间名称
|
238
243
|
query: 时间范围查询参数
|
239
|
-
|
244
|
+
|
240
245
|
Returns:
|
241
246
|
统一的时间序列数据
|
242
247
|
"""
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
if not pool.config.has_postgres():
|
248
|
+
import os
|
249
|
+
|
250
|
+
# 获取 PostgreSQL 配置
|
251
|
+
pg_url = os.environ.get('JETTASK_PG_URL')
|
252
|
+
if not pg_url:
|
249
253
|
return _get_empty_overview_stats()
|
250
|
-
|
254
|
+
|
251
255
|
# 解析时间范围
|
252
256
|
time_range_result = _parse_time_range_query(query)
|
253
|
-
|
257
|
+
|
254
258
|
# 构建队列筛选条件
|
255
|
-
# 使用 TimeRangeQuery 的 queues 字段(支持多队列列表)
|
256
259
|
queue_list = query.queues if hasattr(query, 'queues') and query.queues else None
|
257
260
|
queue_filter, _, queue_params = _build_queue_filter_and_params(queue_list)
|
258
|
-
|
259
|
-
|
261
|
+
|
262
|
+
# 获取数据库会话
|
263
|
+
_, session_factory = get_pg_engine_and_factory(pg_url)
|
264
|
+
async with session_factory() as session:
|
260
265
|
# 执行统一查询
|
261
266
|
result = await _execute_overview_query(
|
262
267
|
session, namespace, time_range_result,
|
@@ -531,17 +536,19 @@ async def _get_task_distribution(session, namespace, start_time, end_time, queue
|
|
531
536
|
|
532
537
|
async def _get_top_backlog_queues(namespace, limit, time_range, queues):
|
533
538
|
"""获取积压最多的队列Top N"""
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
if not
|
539
|
+
import os
|
540
|
+
|
541
|
+
# 获取 PostgreSQL 配置
|
542
|
+
pg_url = os.environ.get('JETTASK_PG_URL')
|
543
|
+
if not pg_url:
|
539
544
|
return {"success": True, "data": []}
|
540
|
-
|
545
|
+
|
541
546
|
end_time = datetime.now(timezone.utc)
|
542
547
|
start_time = _parse_time_range(time_range, end_time)
|
543
|
-
|
544
|
-
|
548
|
+
|
549
|
+
# 获取数据库会话
|
550
|
+
_, session_factory = get_pg_engine_and_factory(pg_url)
|
551
|
+
async with session_factory() as session:
|
545
552
|
queue_list = []
|
546
553
|
if queues:
|
547
554
|
# 如果是字符串,按逗号分割(向后兼容)
|
@@ -664,17 +671,19 @@ async def _get_top_backlog_from_tasks(session, namespace, limit):
|
|
664
671
|
|
665
672
|
async def _get_top_error_queues(namespace, limit, time_range, queues):
|
666
673
|
"""获取错误率最高的队列Top N"""
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
if not
|
674
|
+
import os
|
675
|
+
|
676
|
+
# 获取 PostgreSQL 配置
|
677
|
+
pg_url = os.environ.get('JETTASK_PG_URL')
|
678
|
+
if not pg_url:
|
672
679
|
return {"success": True, "data": []}
|
673
|
-
|
680
|
+
|
674
681
|
end_time = datetime.now(timezone.utc)
|
675
682
|
start_time = _parse_time_range(time_range, end_time)
|
676
|
-
|
677
|
-
|
683
|
+
|
684
|
+
# 获取数据库会话
|
685
|
+
_, session_factory = get_pg_engine_and_factory(pg_url)
|
686
|
+
async with session_factory() as session:
|
678
687
|
queue_list = []
|
679
688
|
if queues:
|
680
689
|
queue_list = [q.strip() for q in queues.split(',') if q.strip()]
|
@@ -0,0 +1,426 @@
|
|
1
|
+
"""
|
2
|
+
队列监控服务
|
3
|
+
|
4
|
+
提供队列相关的监控功能
|
5
|
+
"""
|
6
|
+
import logging
|
7
|
+
import time
|
8
|
+
import json
|
9
|
+
from typing import Dict, Any, List, Optional
|
10
|
+
from datetime import datetime
|
11
|
+
|
12
|
+
from .redis_monitor_service import RedisMonitorService
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class QueueMonitorService:
|
18
|
+
"""队列监控服务类"""
|
19
|
+
|
20
|
+
def __init__(self, redis_service: RedisMonitorService):
|
21
|
+
"""
|
22
|
+
初始化队列监控服务
|
23
|
+
|
24
|
+
Args:
|
25
|
+
redis_service: Redis 监控基础服务实例
|
26
|
+
"""
|
27
|
+
self.redis_service = redis_service
|
28
|
+
|
29
|
+
@property
|
30
|
+
def redis(self):
|
31
|
+
"""获取 Redis 客户端"""
|
32
|
+
return self.redis_service.redis
|
33
|
+
|
34
|
+
@property
|
35
|
+
def redis_prefix(self) -> str:
|
36
|
+
"""获取 Redis 前缀"""
|
37
|
+
return self.redis_service.redis_prefix
|
38
|
+
|
39
|
+
async def get_all_queues(self) -> List[str]:
|
40
|
+
"""
|
41
|
+
获取所有队列名称
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
队列名称列表(已排序)
|
45
|
+
"""
|
46
|
+
try:
|
47
|
+
# 检查缓存是否有效
|
48
|
+
current_time = time.time()
|
49
|
+
if (self.redis_service._queues_cache is not None and
|
50
|
+
(current_time - self.redis_service._queues_cache_time) < self.redis_service._queues_cache_ttl):
|
51
|
+
logger.debug("Returning cached queue list")
|
52
|
+
return self.redis_service._queues_cache
|
53
|
+
|
54
|
+
# 优先尝试从全局队列集合获取
|
55
|
+
global_queues_key = f'{self.redis_prefix}:global:queues'
|
56
|
+
queues = await self.redis.smembers(global_queues_key)
|
57
|
+
|
58
|
+
if queues:
|
59
|
+
# 如果有全局队列集合,直接使用
|
60
|
+
result = sorted(list(queues))
|
61
|
+
self.redis_service._queues_cache = result
|
62
|
+
self.redis_service._queues_cache_time = current_time
|
63
|
+
logger.info(f"Retrieved {len(result)} queues from global set")
|
64
|
+
return result
|
65
|
+
|
66
|
+
# 如果没有全局队列集合,使用 QueueRegistry
|
67
|
+
from jettask.messaging.registry import QueueRegistry
|
68
|
+
|
69
|
+
queue_registry = QueueRegistry(
|
70
|
+
redis_client=None,
|
71
|
+
async_redis_client=self.redis,
|
72
|
+
redis_prefix=self.redis_prefix
|
73
|
+
)
|
74
|
+
|
75
|
+
# 获取所有队列
|
76
|
+
queues = await queue_registry.get_all_queues()
|
77
|
+
|
78
|
+
# 返回排序后的队列列表并更新缓存
|
79
|
+
result = sorted(list(queues))
|
80
|
+
self.redis_service._queues_cache = result
|
81
|
+
self.redis_service._queues_cache_time = current_time
|
82
|
+
logger.info(f"Retrieved {len(result)} queues from registry")
|
83
|
+
return result
|
84
|
+
|
85
|
+
except Exception as e:
|
86
|
+
logger.error(f"Error getting all queues: {e}", exc_info=True)
|
87
|
+
return []
|
88
|
+
|
89
|
+
async def get_queue_stats(self, queue_name: str) -> Dict[str, Any]:
|
90
|
+
"""
|
91
|
+
获取队列统计信息(RabbitMQ 兼容格式)
|
92
|
+
|
93
|
+
Args:
|
94
|
+
queue_name: 队列名称
|
95
|
+
|
96
|
+
Returns:
|
97
|
+
队列统计信息字典
|
98
|
+
"""
|
99
|
+
prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
|
100
|
+
|
101
|
+
try:
|
102
|
+
# 获取 Stream 信息和消费者组信息
|
103
|
+
info = await self.redis.xinfo_stream(prefixed_queue_name)
|
104
|
+
groups = await self.redis.xinfo_groups(prefixed_queue_name)
|
105
|
+
|
106
|
+
except Exception as e:
|
107
|
+
# 如果队列不存在,返回默认值
|
108
|
+
logger.warning(f"Queue {queue_name} does not exist or has no groups: {e}")
|
109
|
+
return {
|
110
|
+
"queue": queue_name,
|
111
|
+
"messages": 0,
|
112
|
+
"messages_ready": 0,
|
113
|
+
"messages_unacknowledged": 0,
|
114
|
+
"consumers": 0,
|
115
|
+
"message_stats": {
|
116
|
+
"publish": 0,
|
117
|
+
"deliver_get": 0,
|
118
|
+
"ack": 0
|
119
|
+
},
|
120
|
+
"consumer_groups": [],
|
121
|
+
"error": str(e)
|
122
|
+
}
|
123
|
+
|
124
|
+
# 计算基础指标
|
125
|
+
total_messages = info["length"]
|
126
|
+
total_pending = 0
|
127
|
+
total_consumers = 0
|
128
|
+
total_delivered = 0
|
129
|
+
|
130
|
+
consumer_groups_info = []
|
131
|
+
|
132
|
+
for group in groups:
|
133
|
+
group_pending = group["pending"]
|
134
|
+
group_consumers_count = group["consumers"]
|
135
|
+
|
136
|
+
total_pending += group_pending
|
137
|
+
total_consumers += group_consumers_count
|
138
|
+
|
139
|
+
group_info = {
|
140
|
+
"name": group["name"],
|
141
|
+
"consumers": group_consumers_count,
|
142
|
+
"pending": group_pending,
|
143
|
+
"last_delivered_id": group["last-delivered-id"]
|
144
|
+
}
|
145
|
+
|
146
|
+
# 获取消费者详情
|
147
|
+
try:
|
148
|
+
consumers = await self.redis.xinfo_consumers(prefixed_queue_name, group["name"])
|
149
|
+
group_info["consumer_details"] = consumers
|
150
|
+
|
151
|
+
# 从消费者统计中计算 deliver 数量
|
152
|
+
for consumer in consumers:
|
153
|
+
total_delivered += consumer.get("pel-count", 0)
|
154
|
+
|
155
|
+
except Exception as e:
|
156
|
+
group_info["consumer_details"] = []
|
157
|
+
logger.warning(f"Error getting consumers for group {group['name']}: {e}")
|
158
|
+
|
159
|
+
consumer_groups_info.append(group_info)
|
160
|
+
|
161
|
+
# 从 Worker 统计中获取更精确的消息统计(需要 Worker 监控服务)
|
162
|
+
try:
|
163
|
+
from .worker_monitor_service import WorkerMonitorService
|
164
|
+
worker_service = WorkerMonitorService(self.redis_service)
|
165
|
+
worker_summary = await worker_service.get_queue_worker_summary_fast(queue_name)
|
166
|
+
|
167
|
+
publish_count = worker_summary.get('total_count', 0)
|
168
|
+
deliver_count = worker_summary.get('total_success_count', 0) + worker_summary.get('total_failed_count', 0)
|
169
|
+
ack_count = worker_summary.get('total_success_count', 0)
|
170
|
+
avg_processing_time = worker_summary.get('avg_processing_time', 0.0)
|
171
|
+
avg_latency_time = worker_summary.get('avg_latency_time', 0.0)
|
172
|
+
total_running_tasks = worker_summary.get('total_running_tasks', 0)
|
173
|
+
except Exception as e:
|
174
|
+
logger.warning(f"Error getting worker summary for queue {queue_name}: {e}")
|
175
|
+
publish_count = 0
|
176
|
+
deliver_count = 0
|
177
|
+
ack_count = 0
|
178
|
+
avg_processing_time = 0.0
|
179
|
+
avg_latency_time = 0.0
|
180
|
+
total_running_tasks = 0
|
181
|
+
|
182
|
+
# 计算就绪消息数(队列总长度 - 未确认消息数)
|
183
|
+
messages_ready = max(0, total_messages - total_pending)
|
184
|
+
|
185
|
+
# RabbitMQ 风格的统计信息
|
186
|
+
stats = {
|
187
|
+
"queue": queue_name,
|
188
|
+
# RabbitMQ 兼容指标
|
189
|
+
"messages": total_messages, # 队列中消息总数
|
190
|
+
"messages_ready": messages_ready, # 就绪状态的消息数
|
191
|
+
"messages_unacknowledged": total_pending, # 未确认的消息数
|
192
|
+
"consumers": total_consumers, # 消费者数量
|
193
|
+
"message_stats": {
|
194
|
+
"publish": publish_count, # 发布到队列的消息数量
|
195
|
+
"deliver_get": deliver_count, # 被消费的消息数量
|
196
|
+
"ack": ack_count # 被确认的消息数量
|
197
|
+
},
|
198
|
+
# 原有详细信息保持兼容性
|
199
|
+
"length": info["length"],
|
200
|
+
"first_entry": info.get("first-entry"),
|
201
|
+
"last_entry": info.get("last-entry"),
|
202
|
+
"consumer_groups": consumer_groups_info,
|
203
|
+
# 额外的性能指标
|
204
|
+
"performance_stats": {
|
205
|
+
"avg_processing_time": avg_processing_time,
|
206
|
+
"avg_latency_time": avg_latency_time,
|
207
|
+
"total_running_tasks": total_running_tasks
|
208
|
+
}
|
209
|
+
}
|
210
|
+
|
211
|
+
logger.debug(f"Queue stats for {queue_name}: {total_messages} messages, {total_consumers} consumers")
|
212
|
+
return stats
|
213
|
+
|
214
|
+
async def get_stream_info(self, queue_name: str, event_id: str) -> Optional[Dict[str, Any]]:
|
215
|
+
"""
|
216
|
+
从 Stream 中获取任务详细信息
|
217
|
+
|
218
|
+
Args:
|
219
|
+
queue_name: 队列名称
|
220
|
+
event_id: 事件 ID 或消息 ID
|
221
|
+
|
222
|
+
Returns:
|
223
|
+
任务信息字典,如果未找到则返回 None
|
224
|
+
"""
|
225
|
+
try:
|
226
|
+
prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
|
227
|
+
|
228
|
+
# 先尝试按 event_id 直接查找
|
229
|
+
messages = await self.redis.xrange(prefixed_queue_name, min=event_id, max=event_id, count=1)
|
230
|
+
|
231
|
+
if messages:
|
232
|
+
msg_id, data = messages[0]
|
233
|
+
logger.debug(f"Found task {event_id} in queue {queue_name}")
|
234
|
+
return {
|
235
|
+
"message_id": msg_id,
|
236
|
+
"data": data,
|
237
|
+
"queue": queue_name
|
238
|
+
}
|
239
|
+
|
240
|
+
# 如果没找到,可能 event_id 是消息内容的一部分,扫描最近的消息
|
241
|
+
messages = await self.redis.xrange(prefixed_queue_name, count=100)
|
242
|
+
for msg_id, data in messages:
|
243
|
+
if data.get("event_id") == event_id or data.get("id") == event_id:
|
244
|
+
logger.debug(f"Found task {event_id} in recent messages")
|
245
|
+
return {
|
246
|
+
"message_id": msg_id,
|
247
|
+
"data": data,
|
248
|
+
"queue": queue_name
|
249
|
+
}
|
250
|
+
|
251
|
+
logger.warning(f"Task {event_id} not found in queue {queue_name}")
|
252
|
+
return None
|
253
|
+
|
254
|
+
except Exception as e:
|
255
|
+
logger.error(f"Error reading from stream {prefixed_queue_name}: {e}", exc_info=True)
|
256
|
+
return None
|
257
|
+
|
258
|
+
async def get_queue_tasks(
|
259
|
+
self,
|
260
|
+
queue_name: str,
|
261
|
+
start_time: Optional[str] = None,
|
262
|
+
end_time: Optional[str] = None,
|
263
|
+
limit: int = 100
|
264
|
+
) -> Dict[str, Any]:
|
265
|
+
"""
|
266
|
+
获取指定队列的任务(基于时间范围)
|
267
|
+
|
268
|
+
Args:
|
269
|
+
queue_name: 队列名称
|
270
|
+
start_time: 开始时间(Redis Stream ID 格式或时间戳)
|
271
|
+
end_time: 结束时间(Redis Stream ID 格式或时间戳)
|
272
|
+
limit: 返回的最大任务数
|
273
|
+
|
274
|
+
Returns:
|
275
|
+
包含任务列表和分页信息的字典
|
276
|
+
"""
|
277
|
+
all_tasks = []
|
278
|
+
|
279
|
+
try:
|
280
|
+
# 处理时间参数
|
281
|
+
if not end_time:
|
282
|
+
end_time = '+'
|
283
|
+
if not start_time:
|
284
|
+
start_time = '-'
|
285
|
+
|
286
|
+
# 从队列的 stream 中读取消息
|
287
|
+
prefixed_queue_name = self.redis_service.get_prefixed_queue_name(queue_name)
|
288
|
+
messages = await self.redis.xrevrange(
|
289
|
+
prefixed_queue_name,
|
290
|
+
max=end_time,
|
291
|
+
min=start_time,
|
292
|
+
count=limit
|
293
|
+
)
|
294
|
+
|
295
|
+
for msg_id, data in messages:
|
296
|
+
event_id = msg_id
|
297
|
+
|
298
|
+
# 构建任务信息
|
299
|
+
task_info = {
|
300
|
+
"event_id": event_id,
|
301
|
+
"message_id": msg_id,
|
302
|
+
"stream_data": data,
|
303
|
+
"task_name": data.get("name", "unknown"),
|
304
|
+
"queue": data.get("queue", queue_name),
|
305
|
+
"trigger_time": data.get("trigger_time")
|
306
|
+
}
|
307
|
+
|
308
|
+
# 尝试解析 args 和 kwargs
|
309
|
+
params_str = self._parse_task_params(data)
|
310
|
+
task_info["params_str"] = params_str
|
311
|
+
|
312
|
+
# 从状态键获取信息
|
313
|
+
status_info = await self._get_task_status(event_id, queue_name, data)
|
314
|
+
task_info.update(status_info)
|
315
|
+
|
316
|
+
all_tasks.append(task_info)
|
317
|
+
|
318
|
+
logger.info(f"Retrieved {len(all_tasks)} tasks from queue {queue_name}")
|
319
|
+
|
320
|
+
except Exception as e:
|
321
|
+
logger.error(f"Error reading queue {queue_name}: {e}", exc_info=True)
|
322
|
+
return {
|
323
|
+
"tasks": [],
|
324
|
+
"count": 0,
|
325
|
+
"oldest_id": None,
|
326
|
+
"newest_id": None,
|
327
|
+
"has_more": False,
|
328
|
+
"limit": limit
|
329
|
+
}
|
330
|
+
|
331
|
+
# 获取最早和最晚的消息 ID 用于分页导航
|
332
|
+
oldest_id = all_tasks[-1]["message_id"] if all_tasks else None
|
333
|
+
newest_id = all_tasks[0]["message_id"] if all_tasks else None
|
334
|
+
|
335
|
+
return {
|
336
|
+
"tasks": all_tasks,
|
337
|
+
"count": len(all_tasks),
|
338
|
+
"oldest_id": oldest_id,
|
339
|
+
"newest_id": newest_id,
|
340
|
+
"has_more": len(all_tasks) >= limit,
|
341
|
+
"limit": limit
|
342
|
+
}
|
343
|
+
|
344
|
+
def _parse_task_params(self, data: Dict[str, Any]) -> str:
|
345
|
+
"""
|
346
|
+
解析任务参数
|
347
|
+
|
348
|
+
Args:
|
349
|
+
data: 任务数据
|
350
|
+
|
351
|
+
Returns:
|
352
|
+
参数字符串
|
353
|
+
"""
|
354
|
+
try:
|
355
|
+
args_list = []
|
356
|
+
kwargs_dict = {}
|
357
|
+
|
358
|
+
if data.get("args"):
|
359
|
+
args_list = json.loads(data["args"])
|
360
|
+
|
361
|
+
if data.get("kwargs"):
|
362
|
+
kwargs_dict = json.loads(data["kwargs"])
|
363
|
+
|
364
|
+
# 构建参数字符串
|
365
|
+
params_parts = []
|
366
|
+
if args_list:
|
367
|
+
params_parts.extend([str(arg) for arg in args_list])
|
368
|
+
if kwargs_dict:
|
369
|
+
params_parts.extend([f"{k}={v}" for k, v in kwargs_dict.items()])
|
370
|
+
|
371
|
+
return ", ".join(params_parts) if params_parts else "无参数"
|
372
|
+
|
373
|
+
except Exception as e:
|
374
|
+
logger.warning(f"Error parsing task params: {e}")
|
375
|
+
return "解析失败"
|
376
|
+
|
377
|
+
async def _get_task_status(
|
378
|
+
self,
|
379
|
+
event_id: str,
|
380
|
+
queue_name: str,
|
381
|
+
data: Dict[str, Any]
|
382
|
+
) -> Dict[str, Any]:
|
383
|
+
"""
|
384
|
+
获取任务状态
|
385
|
+
|
386
|
+
Args:
|
387
|
+
event_id: 事件 ID
|
388
|
+
queue_name: 队列名称
|
389
|
+
data: 任务数据
|
390
|
+
|
391
|
+
Returns:
|
392
|
+
包含状态信息的字典
|
393
|
+
"""
|
394
|
+
status_key = f"{self.redis_prefix}:STATUS:{event_id}"
|
395
|
+
status = await self.redis.get(status_key)
|
396
|
+
|
397
|
+
if status:
|
398
|
+
try:
|
399
|
+
parsed_status = json.loads(status)
|
400
|
+
return {
|
401
|
+
"status": status,
|
402
|
+
"parsed_status": parsed_status,
|
403
|
+
"consumer": parsed_status.get("consumer", "-")
|
404
|
+
}
|
405
|
+
except Exception as e:
|
406
|
+
logger.warning(f"Error parsing status for task {event_id}: {e}")
|
407
|
+
return {
|
408
|
+
"status": status,
|
409
|
+
"parsed_status": {"status": "unknown"},
|
410
|
+
"consumer": "-"
|
411
|
+
}
|
412
|
+
else:
|
413
|
+
# 构建默认状态
|
414
|
+
default_status = {
|
415
|
+
"status": "未知",
|
416
|
+
"queue": queue_name,
|
417
|
+
"created_at": datetime.fromtimestamp(
|
418
|
+
float(data.get("trigger_time", 0))
|
419
|
+
).isoformat() if data.get("trigger_time") else None
|
420
|
+
}
|
421
|
+
|
422
|
+
return {
|
423
|
+
"status": json.dumps(default_status),
|
424
|
+
"parsed_status": default_status,
|
425
|
+
"consumer": "-"
|
426
|
+
}
|