jettask 0.2.14__py3-none-any.whl → 0.2.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jettask/__init__.py +14 -35
- jettask/{webui/__main__.py → __main__.py} +4 -4
- jettask/api/__init__.py +103 -0
- jettask/api/v1/__init__.py +29 -0
- jettask/api/v1/alerts.py +226 -0
- jettask/api/v1/analytics.py +323 -0
- jettask/api/v1/namespaces.py +134 -0
- jettask/api/v1/overview.py +136 -0
- jettask/api/v1/queues.py +530 -0
- jettask/api/v1/scheduled.py +420 -0
- jettask/api/v1/settings.py +44 -0
- jettask/{webui/api.py → api.py} +4 -46
- jettask/{webui/backend → backend}/main.py +21 -109
- jettask/{webui/backend → backend}/main_unified.py +1 -1
- jettask/{webui/backend → backend}/namespace_api_old.py +3 -30
- jettask/{webui/backend → backend}/namespace_data_access.py +2 -1
- jettask/{webui/backend → backend}/unified_api_router.py +14 -74
- jettask/{core/cli.py → cli.py} +106 -26
- jettask/config/nacos_config.py +386 -0
- jettask/core/app.py +8 -100
- jettask/core/db_manager.py +515 -0
- jettask/core/event_pool.py +5 -2
- jettask/core/unified_manager_base.py +47 -14
- jettask/{webui/db_init.py → db_init.py} +1 -1
- jettask/executors/asyncio.py +2 -2
- jettask/{webui/integrated_gradio_app.py → integrated_gradio_app.py} +1 -1
- jettask/{webui/multi_namespace_consumer.py → multi_namespace_consumer.py} +5 -2
- jettask/{webui/pg_consumer.py → pg_consumer.py} +137 -69
- jettask/{webui/run.py → run.py} +1 -1
- jettask/{webui/run_webui.py → run_webui.py} +4 -4
- jettask/scheduler/multi_namespace_scheduler.py +2 -2
- jettask/scheduler/unified_manager.py +5 -5
- jettask/scheduler/unified_scheduler_manager.py +1 -1
- jettask/schemas/__init__.py +166 -0
- jettask/schemas/alert.py +99 -0
- jettask/schemas/backlog.py +122 -0
- jettask/schemas/common.py +139 -0
- jettask/schemas/monitoring.py +181 -0
- jettask/schemas/namespace.py +168 -0
- jettask/schemas/queue.py +83 -0
- jettask/schemas/scheduled_task.py +128 -0
- jettask/schemas/task.py +70 -0
- jettask/services/__init__.py +24 -0
- jettask/services/alert_service.py +454 -0
- jettask/services/analytics_service.py +46 -0
- jettask/services/overview_service.py +978 -0
- jettask/services/queue_service.py +711 -0
- jettask/services/redis_monitor_service.py +151 -0
- jettask/services/scheduled_task_service.py +207 -0
- jettask/services/settings_service.py +758 -0
- jettask/services/task_service.py +157 -0
- jettask/{webui/task_center.py → task_center.py} +30 -8
- jettask/{webui/task_center_client.py → task_center_client.py} +1 -1
- jettask/{webui/config.py → webui_config.py} +6 -1
- jettask/webui_exceptions.py +67 -0
- jettask/webui_sql/verify_database.sql +72 -0
- {jettask-0.2.14.dist-info → jettask-0.2.16.dist-info}/METADATA +3 -1
- jettask-0.2.16.dist-info/RECORD +150 -0
- {jettask-0.2.14.dist-info → jettask-0.2.16.dist-info}/entry_points.txt +1 -1
- jettask/webui/backend/data_api.py +0 -3294
- jettask/webui/backend/namespace_api.py +0 -295
- jettask/webui/backend/queue_backlog_api.py +0 -727
- jettask/webui/backend/redis_monitor_api.py +0 -476
- jettask/webui/frontend/index.html +0 -13
- jettask/webui/frontend/package.json +0 -30
- jettask/webui/frontend/src/App.css +0 -109
- jettask/webui/frontend/src/App.jsx +0 -66
- jettask/webui/frontend/src/components/NamespaceSelector.jsx +0 -166
- jettask/webui/frontend/src/components/QueueBacklogChart.jsx +0 -298
- jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +0 -638
- jettask/webui/frontend/src/components/QueueDetailsTable.css +0 -65
- jettask/webui/frontend/src/components/QueueDetailsTable.jsx +0 -487
- jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +0 -465
- jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +0 -423
- jettask/webui/frontend/src/components/TaskFilter.jsx +0 -425
- jettask/webui/frontend/src/components/TimeRangeSelector.css +0 -21
- jettask/webui/frontend/src/components/TimeRangeSelector.jsx +0 -160
- jettask/webui/frontend/src/components/charts/QueueChart.jsx +0 -111
- jettask/webui/frontend/src/components/charts/QueueTrendChart.jsx +0 -115
- jettask/webui/frontend/src/components/charts/WorkerChart.jsx +0 -40
- jettask/webui/frontend/src/components/common/StatsCard.jsx +0 -18
- jettask/webui/frontend/src/components/layout/AppLayout.css +0 -95
- jettask/webui/frontend/src/components/layout/AppLayout.jsx +0 -49
- jettask/webui/frontend/src/components/layout/Header.css +0 -106
- jettask/webui/frontend/src/components/layout/Header.jsx +0 -106
- jettask/webui/frontend/src/components/layout/SideMenu.css +0 -137
- jettask/webui/frontend/src/components/layout/SideMenu.jsx +0 -209
- jettask/webui/frontend/src/components/layout/TabsNav.css +0 -244
- jettask/webui/frontend/src/components/layout/TabsNav.jsx +0 -206
- jettask/webui/frontend/src/components/layout/UserInfo.css +0 -197
- jettask/webui/frontend/src/components/layout/UserInfo.jsx +0 -197
- jettask/webui/frontend/src/contexts/LoadingContext.jsx +0 -27
- jettask/webui/frontend/src/contexts/NamespaceContext.jsx +0 -72
- jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +0 -245
- jettask/webui/frontend/src/index.css +0 -114
- jettask/webui/frontend/src/main.jsx +0 -22
- jettask/webui/frontend/src/pages/Alerts.jsx +0 -684
- jettask/webui/frontend/src/pages/Dashboard/index.css +0 -35
- jettask/webui/frontend/src/pages/Dashboard/index.jsx +0 -281
- jettask/webui/frontend/src/pages/Dashboard.jsx +0 -1330
- jettask/webui/frontend/src/pages/QueueDetail.jsx +0 -1117
- jettask/webui/frontend/src/pages/QueueMonitor.jsx +0 -527
- jettask/webui/frontend/src/pages/Queues.jsx +0 -12
- jettask/webui/frontend/src/pages/ScheduledTasks.jsx +0 -810
- jettask/webui/frontend/src/pages/Settings.jsx +0 -801
- jettask/webui/frontend/src/pages/Workers.jsx +0 -12
- jettask/webui/frontend/src/services/api.js +0 -159
- jettask/webui/frontend/src/services/queueTrend.js +0 -166
- jettask/webui/frontend/src/utils/suppressWarnings.js +0 -22
- jettask/webui/frontend/src/utils/userPreferences.js +0 -154
- jettask/webui/frontend/vite.config.js +0 -26
- jettask/webui/sql/init_database.sql +0 -640
- jettask-0.2.14.dist-info/RECORD +0 -172
- /jettask/{webui/backend → backend}/__init__.py +0 -0
- /jettask/{webui/backend → backend}/api/__init__.py +0 -0
- /jettask/{webui/backend → backend}/api/v1/__init__.py +0 -0
- /jettask/{webui/backend → backend}/api/v1/monitoring.py +0 -0
- /jettask/{webui/backend → backend}/api/v1/namespaces.py +0 -0
- /jettask/{webui/backend → backend}/api/v1/queues.py +0 -0
- /jettask/{webui/backend → backend}/api/v1/tasks.py +0 -0
- /jettask/{webui/backend → backend}/config.py +0 -0
- /jettask/{webui/backend → backend}/core/__init__.py +0 -0
- /jettask/{webui/backend → backend}/core/cache.py +0 -0
- /jettask/{webui/backend → backend}/core/database.py +0 -0
- /jettask/{webui/backend → backend}/core/exceptions.py +0 -0
- /jettask/{webui/backend → backend}/data_access.py +0 -0
- /jettask/{webui/backend → backend}/dependencies.py +0 -0
- /jettask/{webui/backend → backend}/init_meta_db.py +0 -0
- /jettask/{webui/backend → backend}/main_v2.py +0 -0
- /jettask/{webui/backend → backend}/models/__init__.py +0 -0
- /jettask/{webui/backend → backend}/models/requests.py +0 -0
- /jettask/{webui/backend → backend}/models/responses.py +0 -0
- /jettask/{webui/backend → backend}/queue_stats_v2.py +0 -0
- /jettask/{webui/backend → backend}/services/__init__.py +0 -0
- /jettask/{webui/backend → backend}/start.py +0 -0
- /jettask/{webui/cleanup_deprecated_tables.sql → cleanup_deprecated_tables.sql} +0 -0
- /jettask/{webui/gradio_app.py → gradio_app.py} +0 -0
- /jettask/{webui/__init__.py → main.py} +0 -0
- /jettask/{webui/models.py → models.py} +0 -0
- /jettask/{webui/run_monitor.py → run_monitor.py} +0 -0
- /jettask/{webui/schema.sql → schema.sql} +0 -0
- /jettask/{webui/unified_consumer_manager.py → unified_consumer_manager.py} +0 -0
- /jettask/{webui/models → webui_models}/__init__.py +0 -0
- /jettask/{webui/models → webui_models}/namespace.py +0 -0
- /jettask/{webui/sql → webui_sql}/batch_upsert_functions.sql +0 -0
- {jettask-0.2.14.dist-info → jettask-0.2.16.dist-info}/WHEEL +0 -0
- {jettask-0.2.14.dist-info → jettask-0.2.16.dist-info}/licenses/LICENSE +0 -0
- {jettask-0.2.14.dist-info → jettask-0.2.16.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,711 @@
|
|
|
1
|
+
"""
|
|
2
|
+
队列服务层
|
|
3
|
+
处理队列相关的业务逻辑
|
|
4
|
+
"""
|
|
5
|
+
from typing import Optional, List, Dict, Any
|
|
6
|
+
from datetime import datetime, timedelta, timezone
|
|
7
|
+
from sqlalchemy import text
|
|
8
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
9
|
+
import logging
|
|
10
|
+
import traceback
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QueueService:
|
|
16
|
+
"""队列服务类"""
|
|
17
|
+
|
|
18
|
+
@staticmethod
|
|
19
|
+
def get_base_queue_name(queue_name: str) -> str:
|
|
20
|
+
"""
|
|
21
|
+
提取基础队列名(去除优先级后缀)
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
queue_name: 完整队列名
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
基础队列名
|
|
28
|
+
"""
|
|
29
|
+
if ':' in queue_name:
|
|
30
|
+
parts = queue_name.rsplit(':', 1)
|
|
31
|
+
if parts[-1].isdigit():
|
|
32
|
+
return parts[0]
|
|
33
|
+
return queue_name
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
async def get_queues_by_namespace(namespace_data_access, namespace: str) -> Dict[str, Any]:
|
|
37
|
+
"""
|
|
38
|
+
获取指定命名空间的队列列表
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
namespace_data_access: 命名空间数据访问实例
|
|
42
|
+
namespace: 命名空间
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
队列列表
|
|
46
|
+
"""
|
|
47
|
+
queues_data = await namespace_data_access.get_queue_stats(namespace)
|
|
48
|
+
return {
|
|
49
|
+
"success": True,
|
|
50
|
+
"data": list(set([QueueService.get_base_queue_name(q['queue_name']) for q in queues_data]))
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
@staticmethod
|
|
54
|
+
async def get_queue_flow_rates(data_access, query) -> Dict[str, Any]:
|
|
55
|
+
"""
|
|
56
|
+
获取单个队列的流量速率(入队、开始执行、完成)
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
data_access: 数据访问层实例
|
|
60
|
+
query: 时间范围查询对象
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
队列流量速率数据
|
|
64
|
+
"""
|
|
65
|
+
# 处理时间范围
|
|
66
|
+
now = datetime.now(timezone.utc)
|
|
67
|
+
|
|
68
|
+
if query.start_time and query.end_time:
|
|
69
|
+
# 使用提供的时间范围
|
|
70
|
+
start_time = query.start_time
|
|
71
|
+
end_time = query.end_time
|
|
72
|
+
logger.info(f"使用自定义时间范围: {start_time} 到 {end_time}")
|
|
73
|
+
else:
|
|
74
|
+
# 根据time_range参数计算时间范围
|
|
75
|
+
time_range_map = {
|
|
76
|
+
"15m": timedelta(minutes=15),
|
|
77
|
+
"30m": timedelta(minutes=30),
|
|
78
|
+
"1h": timedelta(hours=1),
|
|
79
|
+
"3h": timedelta(hours=3),
|
|
80
|
+
"6h": timedelta(hours=6),
|
|
81
|
+
"12h": timedelta(hours=12),
|
|
82
|
+
"24h": timedelta(hours=24),
|
|
83
|
+
"7d": timedelta(days=7),
|
|
84
|
+
"30d": timedelta(days=30),
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
# 优先使用 time_range,如果没有则使用 interval
|
|
88
|
+
time_range_value = query.time_range if query.time_range else query.interval
|
|
89
|
+
delta = time_range_map.get(time_range_value, timedelta(minutes=15))
|
|
90
|
+
|
|
91
|
+
# 获取队列的最新任务时间,确保图表包含最新数据
|
|
92
|
+
queue_name = query.queues[0] if query.queues else None
|
|
93
|
+
if queue_name:
|
|
94
|
+
latest_time = await data_access.get_latest_task_time(queue_name)
|
|
95
|
+
if latest_time:
|
|
96
|
+
# 使用最新任务时间作为结束时间
|
|
97
|
+
end_time = latest_time.replace(second=59, microsecond=999999) # 包含整分钟
|
|
98
|
+
logger.info(f"使用最新任务时间: {latest_time}")
|
|
99
|
+
else:
|
|
100
|
+
# 如果没有任务,使用当前时间
|
|
101
|
+
end_time = now.replace(second=0, microsecond=0)
|
|
102
|
+
else:
|
|
103
|
+
end_time = now.replace(second=0, microsecond=0)
|
|
104
|
+
|
|
105
|
+
start_time = end_time - delta
|
|
106
|
+
logger.info(f"使用预设时间范围 {time_range_value}: {start_time} 到 {end_time}, delta: {delta}")
|
|
107
|
+
|
|
108
|
+
# 确保有队列名称
|
|
109
|
+
if not query.queues or len(query.queues) == 0:
|
|
110
|
+
return {"data": [], "granularity": "minute"}
|
|
111
|
+
|
|
112
|
+
# 获取第一个队列的流量速率
|
|
113
|
+
queue_name = query.queues[0]
|
|
114
|
+
# TimeRangeQuery 没有 filters 属性,传递 None 或空字典
|
|
115
|
+
filters = getattr(query, 'filters', None)
|
|
116
|
+
data, granularity = await data_access.fetch_queue_flow_rates(
|
|
117
|
+
queue_name, start_time, end_time, filters
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
return {"data": data, "granularity": granularity}
|
|
121
|
+
|
|
122
|
+
@staticmethod
|
|
123
|
+
async def get_global_stats(data_access) -> Dict[str, Any]:
|
|
124
|
+
"""
|
|
125
|
+
获取全局统计信息
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
data_access: 数据访问层实例
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
全局统计数据
|
|
132
|
+
"""
|
|
133
|
+
stats_data = await data_access.fetch_global_stats()
|
|
134
|
+
return {
|
|
135
|
+
"success": True,
|
|
136
|
+
"data": stats_data
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
@staticmethod
|
|
140
|
+
async def get_queues_detail(data_access) -> Dict[str, Any]:
|
|
141
|
+
"""
|
|
142
|
+
获取队列详细信息
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
data_access: 数据访问层实例
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
队列详细数据
|
|
149
|
+
"""
|
|
150
|
+
queues_data = await data_access.fetch_queues_data()
|
|
151
|
+
return {
|
|
152
|
+
"success": True,
|
|
153
|
+
"data": queues_data
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
@staticmethod
|
|
157
|
+
async def delete_queue(queue_name: str) -> Dict[str, Any]:
|
|
158
|
+
"""
|
|
159
|
+
删除队列
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
queue_name: 队列名称
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
操作结果
|
|
166
|
+
"""
|
|
167
|
+
# TODO: 实现删除队列的逻辑
|
|
168
|
+
logger.info(f"删除队列请求: {queue_name}")
|
|
169
|
+
return {
|
|
170
|
+
"success": True,
|
|
171
|
+
"message": f"队列 {queue_name} 已删除"
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
@staticmethod
|
|
175
|
+
async def trim_queue(queue_name: str, max_length: int) -> Dict[str, Any]:
|
|
176
|
+
"""
|
|
177
|
+
裁剪队列到指定长度
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
queue_name: 队列名称
|
|
181
|
+
max_length: 最大长度
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
操作结果
|
|
185
|
+
"""
|
|
186
|
+
# TODO: 实现裁剪队列的逻辑
|
|
187
|
+
logger.info(f"裁剪队列请求: {queue_name}, 保留 {max_length} 条消息")
|
|
188
|
+
return {
|
|
189
|
+
"success": True,
|
|
190
|
+
"message": f"队列 {queue_name} 已裁剪至 {max_length} 条消息"
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
async def get_queue_stats_v2(
|
|
195
|
+
namespace_data_access,
|
|
196
|
+
namespace: str,
|
|
197
|
+
queue: Optional[str] = None,
|
|
198
|
+
start_time: Optional[datetime] = None,
|
|
199
|
+
end_time: Optional[datetime] = None,
|
|
200
|
+
time_range: Optional[str] = None
|
|
201
|
+
) -> Dict[str, Any]:
|
|
202
|
+
"""
|
|
203
|
+
获取队列统计信息v2 - 支持消费者组详情和优先级队列
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
namespace_data_access: 命名空间数据访问实例
|
|
207
|
+
namespace: 命名空间
|
|
208
|
+
queue: 可选,筛选特定队列
|
|
209
|
+
start_time: 开始时间
|
|
210
|
+
end_time: 结束时间
|
|
211
|
+
time_range: 时间范围
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
队列统计数据
|
|
215
|
+
"""
|
|
216
|
+
# 获取命名空间连接
|
|
217
|
+
conn = await namespace_data_access.manager.get_connection(namespace)
|
|
218
|
+
|
|
219
|
+
# 获取Redis客户端
|
|
220
|
+
redis_client = await conn.get_redis_client(decode=False)
|
|
221
|
+
|
|
222
|
+
# 获取PostgreSQL会话(可选)
|
|
223
|
+
pg_session = None
|
|
224
|
+
if conn.AsyncSessionLocal:
|
|
225
|
+
pg_session = conn.AsyncSessionLocal()
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
# 导入 QueueStatsV2
|
|
229
|
+
from jettask.backend.queue_stats_v2 import QueueStatsV2
|
|
230
|
+
|
|
231
|
+
# 创建统计服务实例
|
|
232
|
+
stats_service = QueueStatsV2(
|
|
233
|
+
redis_client=redis_client,
|
|
234
|
+
pg_session=pg_session,
|
|
235
|
+
redis_prefix=conn.redis_prefix
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# 处理时间筛选参数
|
|
239
|
+
time_filter = None
|
|
240
|
+
if time_range or start_time or end_time:
|
|
241
|
+
time_filter = {}
|
|
242
|
+
|
|
243
|
+
# 如果提供了time_range,计算开始和结束时间
|
|
244
|
+
if time_range and time_range != 'custom':
|
|
245
|
+
now = datetime.now(timezone.utc)
|
|
246
|
+
if time_range.endswith('m'):
|
|
247
|
+
minutes = int(time_range[:-1])
|
|
248
|
+
time_filter['start_time'] = now - timedelta(minutes=minutes)
|
|
249
|
+
time_filter['end_time'] = now
|
|
250
|
+
elif time_range.endswith('h'):
|
|
251
|
+
hours = int(time_range[:-1])
|
|
252
|
+
time_filter['start_time'] = now - timedelta(hours=hours)
|
|
253
|
+
time_filter['end_time'] = now
|
|
254
|
+
elif time_range.endswith('d'):
|
|
255
|
+
days = int(time_range[:-1])
|
|
256
|
+
time_filter['start_time'] = now - timedelta(days=days)
|
|
257
|
+
time_filter['end_time'] = now
|
|
258
|
+
else:
|
|
259
|
+
# 使用提供的start_time和end_time
|
|
260
|
+
if start_time:
|
|
261
|
+
time_filter['start_time'] = start_time
|
|
262
|
+
if end_time:
|
|
263
|
+
time_filter['end_time'] = end_time
|
|
264
|
+
|
|
265
|
+
# 获取队列统计(使用分组格式)
|
|
266
|
+
stats = await stats_service.get_queue_stats_grouped(time_filter)
|
|
267
|
+
|
|
268
|
+
# 如果指定了队列筛选,则过滤结果
|
|
269
|
+
if queue:
|
|
270
|
+
stats = [s for s in stats if s['queue_name'] == queue]
|
|
271
|
+
|
|
272
|
+
return {
|
|
273
|
+
"success": True,
|
|
274
|
+
"data": stats
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
finally:
|
|
278
|
+
if pg_session:
|
|
279
|
+
await pg_session.close()
|
|
280
|
+
await redis_client.aclose()
|
|
281
|
+
|
|
282
|
+
@staticmethod
|
|
283
|
+
async def get_tasks_v2(namespace_data_access, namespace: str, body: Dict[str, Any]) -> Dict[str, Any]:
|
|
284
|
+
"""
|
|
285
|
+
获取任务列表v2 - 支持tasks和task_runs表连表查询
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
namespace_data_access: 命名空间数据访问实例
|
|
289
|
+
namespace: 命名空间
|
|
290
|
+
body: 请求体参数
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
任务列表数据
|
|
294
|
+
"""
|
|
295
|
+
from sqlalchemy import text
|
|
296
|
+
from datetime import datetime, timezone, timedelta
|
|
297
|
+
|
|
298
|
+
queue_name = body.get('queue_name')
|
|
299
|
+
page = body.get('page', 1)
|
|
300
|
+
page_size = body.get('page_size', 20)
|
|
301
|
+
filters = body.get('filters', [])
|
|
302
|
+
time_range = body.get('time_range', '1h')
|
|
303
|
+
start_time = body.get('start_time')
|
|
304
|
+
end_time = body.get('end_time')
|
|
305
|
+
sort_field = body.get('sort_field', 'created_at')
|
|
306
|
+
sort_order = body.get('sort_order', 'desc')
|
|
307
|
+
|
|
308
|
+
if not queue_name:
|
|
309
|
+
raise ValueError("queue_name is required")
|
|
310
|
+
|
|
311
|
+
# 获取命名空间连接
|
|
312
|
+
conn = await namespace_data_access.manager.get_connection(namespace)
|
|
313
|
+
|
|
314
|
+
if not conn.pg_config or not conn.async_engine:
|
|
315
|
+
return {
|
|
316
|
+
"success": True,
|
|
317
|
+
"data": [],
|
|
318
|
+
"total": 0
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
# 解析时间范围
|
|
322
|
+
if start_time and end_time:
|
|
323
|
+
# 使用自定义时间范围
|
|
324
|
+
start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00'))
|
|
325
|
+
end_dt = datetime.fromisoformat(end_time.replace('Z', '+00:00'))
|
|
326
|
+
else:
|
|
327
|
+
# 使用预定义时间范围
|
|
328
|
+
end_dt = datetime.now(timezone.utc)
|
|
329
|
+
time_deltas = {
|
|
330
|
+
'15m': timedelta(minutes=15),
|
|
331
|
+
'30m': timedelta(minutes=30),
|
|
332
|
+
'1h': timedelta(hours=1),
|
|
333
|
+
'3h': timedelta(hours=3),
|
|
334
|
+
'6h': timedelta(hours=6),
|
|
335
|
+
'12h': timedelta(hours=12),
|
|
336
|
+
'1d': timedelta(days=1),
|
|
337
|
+
'3d': timedelta(days=3),
|
|
338
|
+
'7d': timedelta(days=7),
|
|
339
|
+
'30d': timedelta(days=30)
|
|
340
|
+
}
|
|
341
|
+
delta = time_deltas.get(time_range, timedelta(hours=1))
|
|
342
|
+
start_dt = end_dt - delta
|
|
343
|
+
|
|
344
|
+
offset = (page - 1) * page_size
|
|
345
|
+
|
|
346
|
+
async with conn.async_engine.begin() as pg_conn:
|
|
347
|
+
# 构建查询条件
|
|
348
|
+
conditions = [
|
|
349
|
+
"t.namespace = :namespace",
|
|
350
|
+
"t.queue = :queue",
|
|
351
|
+
"t.created_at >= :start_time",
|
|
352
|
+
"t.created_at <= :end_time"
|
|
353
|
+
]
|
|
354
|
+
query_params = {
|
|
355
|
+
"namespace": namespace,
|
|
356
|
+
"queue": queue_name,
|
|
357
|
+
"start_time": start_dt,
|
|
358
|
+
"end_time": end_dt,
|
|
359
|
+
"limit": page_size,
|
|
360
|
+
"offset": offset
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
# 处理筛选条件
|
|
364
|
+
for i, filter_item in enumerate(filters):
|
|
365
|
+
field = filter_item.get('field')
|
|
366
|
+
operator = filter_item.get('operator')
|
|
367
|
+
value = filter_item.get('value')
|
|
368
|
+
|
|
369
|
+
if field and operator and value is not None:
|
|
370
|
+
param_key = f"filter_{i}"
|
|
371
|
+
|
|
372
|
+
# 映射前端字段到数据库字段(使用payload JSONB列)
|
|
373
|
+
db_field_map = {
|
|
374
|
+
'id': 't.stream_id',
|
|
375
|
+
'task_name': "t.payload::jsonb->'event_data'->>'__task_name'",
|
|
376
|
+
'status': "t.payload::jsonb->>'status'",
|
|
377
|
+
'worker_id': "t.payload::jsonb->>'worker_id'",
|
|
378
|
+
'scheduled_task_id': 't.scheduled_task_id'
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
db_field = db_field_map.get(field, f't.{field}')
|
|
382
|
+
|
|
383
|
+
if operator == 'eq':
|
|
384
|
+
conditions.append(f"{db_field} = :{param_key}")
|
|
385
|
+
query_params[param_key] = value
|
|
386
|
+
elif operator == 'contains':
|
|
387
|
+
conditions.append(f"{db_field} LIKE :{param_key}")
|
|
388
|
+
query_params[param_key] = f"%{value}%"
|
|
389
|
+
elif operator == 'gt':
|
|
390
|
+
conditions.append(f"{db_field} > :{param_key}")
|
|
391
|
+
query_params[param_key] = value
|
|
392
|
+
elif operator == 'lt':
|
|
393
|
+
conditions.append(f"{db_field} < :{param_key}")
|
|
394
|
+
query_params[param_key] = value
|
|
395
|
+
|
|
396
|
+
where_clause = " AND ".join(conditions)
|
|
397
|
+
|
|
398
|
+
# 获取总数
|
|
399
|
+
count_query = f"""
|
|
400
|
+
SELECT COUNT(*) as total
|
|
401
|
+
FROM tasks t
|
|
402
|
+
WHERE {where_clause}
|
|
403
|
+
"""
|
|
404
|
+
count_result = await pg_conn.execute(text(count_query), query_params)
|
|
405
|
+
total = count_result.fetchone().total
|
|
406
|
+
|
|
407
|
+
# 构建排序
|
|
408
|
+
sort_map = {
|
|
409
|
+
'created_at': 't.created_at',
|
|
410
|
+
'started_at': 't.started_at',
|
|
411
|
+
'completed_at': 't.completed_at'
|
|
412
|
+
}
|
|
413
|
+
order_by = sort_map.get(sort_field, 't.created_at')
|
|
414
|
+
order_direction = 'DESC' if sort_order == 'desc' else 'ASC'
|
|
415
|
+
|
|
416
|
+
# 获取任务列表(从payload JSONB中提取数据)
|
|
417
|
+
query = f"""
|
|
418
|
+
SELECT
|
|
419
|
+
t.stream_id as id,
|
|
420
|
+
t.payload::jsonb->'event_data'->>'__task_name' as task_name,
|
|
421
|
+
t.queue,
|
|
422
|
+
t.payload::jsonb->>'status' as status,
|
|
423
|
+
t.priority,
|
|
424
|
+
COALESCE((t.payload::jsonb->>'retry_count')::int, 0) as retry_count,
|
|
425
|
+
COALESCE((t.payload::jsonb->>'max_retry')::int, 3) as max_retry,
|
|
426
|
+
t.created_at,
|
|
427
|
+
(t.payload::jsonb->>'started_at')::timestamptz as started_at,
|
|
428
|
+
(t.payload::jsonb->>'completed_at')::timestamptz as completed_at,
|
|
429
|
+
t.payload::jsonb->>'worker_id' as worker_id,
|
|
430
|
+
t.payload::jsonb->>'error_message' as error_message,
|
|
431
|
+
(t.payload::jsonb->>'execution_time')::float as execution_time,
|
|
432
|
+
CASE
|
|
433
|
+
WHEN t.payload::jsonb->>'completed_at' IS NOT NULL AND t.created_at IS NOT NULL
|
|
434
|
+
THEN EXTRACT(EPOCH FROM ((t.payload::jsonb->>'completed_at')::timestamptz - t.created_at))
|
|
435
|
+
ELSE NULL
|
|
436
|
+
END as duration,
|
|
437
|
+
t.scheduled_task_id,
|
|
438
|
+
t.source,
|
|
439
|
+
t.metadata
|
|
440
|
+
FROM tasks t
|
|
441
|
+
WHERE {where_clause}
|
|
442
|
+
ORDER BY {order_by} {order_direction}
|
|
443
|
+
LIMIT :limit OFFSET :offset
|
|
444
|
+
"""
|
|
445
|
+
|
|
446
|
+
result = await pg_conn.execute(text(query), query_params)
|
|
447
|
+
|
|
448
|
+
tasks = []
|
|
449
|
+
for row in result:
|
|
450
|
+
tasks.append({
|
|
451
|
+
"id": row.id,
|
|
452
|
+
"task_name": row.task_name or "unknown",
|
|
453
|
+
"queue": row.queue,
|
|
454
|
+
"status": row.status,
|
|
455
|
+
"priority": row.priority,
|
|
456
|
+
"retry_count": row.retry_count,
|
|
457
|
+
"max_retry": row.max_retry,
|
|
458
|
+
"created_at": row.created_at.isoformat() if row.created_at else None,
|
|
459
|
+
"started_at": row.started_at.isoformat() if row.started_at else None,
|
|
460
|
+
"completed_at": row.completed_at.isoformat() if row.completed_at else None,
|
|
461
|
+
"duration": round(row.duration, 2) if row.duration else None,
|
|
462
|
+
"execution_time": float(row.execution_time) if row.execution_time else None,
|
|
463
|
+
"worker_id": row.worker_id,
|
|
464
|
+
"error_message": row.error_message
|
|
465
|
+
})
|
|
466
|
+
|
|
467
|
+
return {
|
|
468
|
+
"success": True,
|
|
469
|
+
"data": tasks,
|
|
470
|
+
"total": total
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
@staticmethod
|
|
474
|
+
async def get_consumer_group_stats(namespace_data_access, namespace: str, group_name: str) -> Dict[str, Any]:
|
|
475
|
+
"""
|
|
476
|
+
获取特定消费者组的详细统计
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
namespace_data_access: 命名空间数据访问实例
|
|
480
|
+
namespace: 命名空间
|
|
481
|
+
group_name: 消费者组名称
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
消费者组统计数据
|
|
485
|
+
"""
|
|
486
|
+
# 获取命名空间连接
|
|
487
|
+
conn = await namespace_data_access.manager.get_connection(namespace)
|
|
488
|
+
|
|
489
|
+
# 获取PostgreSQL会话
|
|
490
|
+
if not conn.AsyncSessionLocal:
|
|
491
|
+
raise ValueError("PostgreSQL not configured for this namespace")
|
|
492
|
+
|
|
493
|
+
async with conn.AsyncSessionLocal() as session:
|
|
494
|
+
# 查询消费者组的执行统计
|
|
495
|
+
query = text("""
|
|
496
|
+
WITH group_stats AS (
|
|
497
|
+
SELECT
|
|
498
|
+
tr.consumer_group,
|
|
499
|
+
tr.task_name,
|
|
500
|
+
COUNT(*) as total_tasks,
|
|
501
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_count,
|
|
502
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_count,
|
|
503
|
+
COUNT(CASE WHEN tr.status = 'running' THEN 1 END) as running_count,
|
|
504
|
+
AVG(tr.execution_time) as avg_execution_time,
|
|
505
|
+
MIN(tr.execution_time) as min_execution_time,
|
|
506
|
+
MAX(tr.execution_time) as max_execution_time,
|
|
507
|
+
AVG(tr.duration) as avg_duration,
|
|
508
|
+
MIN(tr.started_at) as first_task_time,
|
|
509
|
+
MAX(tr.completed_at) as last_task_time
|
|
510
|
+
FROM task_runs tr
|
|
511
|
+
WHERE tr.consumer_group = :group_name
|
|
512
|
+
AND tr.started_at > NOW() - INTERVAL '24 hours'
|
|
513
|
+
GROUP BY tr.consumer_group, tr.task_name
|
|
514
|
+
),
|
|
515
|
+
hourly_stats AS (
|
|
516
|
+
SELECT
|
|
517
|
+
DATE_TRUNC('hour', tr.started_at) as hour,
|
|
518
|
+
COUNT(*) as task_count,
|
|
519
|
+
AVG(tr.execution_time) as avg_exec_time
|
|
520
|
+
FROM task_runs tr
|
|
521
|
+
WHERE tr.consumer_group = :group_name
|
|
522
|
+
AND tr.started_at > NOW() - INTERVAL '24 hours'
|
|
523
|
+
GROUP BY DATE_TRUNC('hour', tr.started_at)
|
|
524
|
+
ORDER BY hour
|
|
525
|
+
)
|
|
526
|
+
SELECT
|
|
527
|
+
(SELECT row_to_json(gs) FROM group_stats gs) as summary,
|
|
528
|
+
(SELECT json_agg(hs) FROM hourly_stats hs) as hourly_trend
|
|
529
|
+
""")
|
|
530
|
+
|
|
531
|
+
result = await session.execute(query, {'group_name': group_name})
|
|
532
|
+
row = result.fetchone()
|
|
533
|
+
|
|
534
|
+
if not row or not row.summary:
|
|
535
|
+
return {
|
|
536
|
+
"success": True,
|
|
537
|
+
"data": {
|
|
538
|
+
"group_name": group_name,
|
|
539
|
+
"summary": {},
|
|
540
|
+
"hourly_trend": []
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
return {
|
|
545
|
+
"success": True,
|
|
546
|
+
"data": {
|
|
547
|
+
"group_name": group_name,
|
|
548
|
+
"summary": row.summary,
|
|
549
|
+
"hourly_trend": row.hourly_trend or []
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
@staticmethod
|
|
554
|
+
async def get_stream_backlog(
|
|
555
|
+
data_access,
|
|
556
|
+
namespace: str,
|
|
557
|
+
stream_name: Optional[str] = None,
|
|
558
|
+
hours: int = 24
|
|
559
|
+
) -> Dict[str, Any]:
|
|
560
|
+
"""
|
|
561
|
+
获取Stream积压监控数据
|
|
562
|
+
|
|
563
|
+
Args:
|
|
564
|
+
data_access: 数据访问层实例
|
|
565
|
+
namespace: 命名空间
|
|
566
|
+
stream_name: 可选,指定stream名称
|
|
567
|
+
hours: 查询最近多少小时的数据
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
Stream积压数据
|
|
571
|
+
"""
|
|
572
|
+
# 计算时间范围
|
|
573
|
+
end_time = datetime.now(timezone.utc)
|
|
574
|
+
start_time = end_time - timedelta(hours=hours)
|
|
575
|
+
|
|
576
|
+
async with data_access.AsyncSessionLocal() as session:
|
|
577
|
+
# 构建查询
|
|
578
|
+
if stream_name:
|
|
579
|
+
query = text("""
|
|
580
|
+
SELECT
|
|
581
|
+
stream_name,
|
|
582
|
+
consumer_group,
|
|
583
|
+
last_published_offset,
|
|
584
|
+
last_delivered_offset,
|
|
585
|
+
last_acked_offset,
|
|
586
|
+
pending_count,
|
|
587
|
+
backlog_undelivered,
|
|
588
|
+
backlog_unprocessed,
|
|
589
|
+
created_at
|
|
590
|
+
FROM stream_backlog_monitor
|
|
591
|
+
WHERE namespace = :namespace
|
|
592
|
+
AND stream_name = :stream_name
|
|
593
|
+
AND created_at >= :start_time
|
|
594
|
+
AND created_at <= :end_time
|
|
595
|
+
ORDER BY created_at DESC
|
|
596
|
+
LIMIT 1000
|
|
597
|
+
""")
|
|
598
|
+
params = {
|
|
599
|
+
'namespace': namespace,
|
|
600
|
+
'stream_name': stream_name,
|
|
601
|
+
'start_time': start_time,
|
|
602
|
+
'end_time': end_time
|
|
603
|
+
}
|
|
604
|
+
else:
|
|
605
|
+
# 获取最新的所有stream数据
|
|
606
|
+
query = text("""
|
|
607
|
+
SELECT DISTINCT ON (stream_name, consumer_group)
|
|
608
|
+
stream_name,
|
|
609
|
+
consumer_group,
|
|
610
|
+
last_published_offset,
|
|
611
|
+
last_delivered_offset,
|
|
612
|
+
last_acked_offset,
|
|
613
|
+
pending_count,
|
|
614
|
+
backlog_undelivered,
|
|
615
|
+
backlog_unprocessed,
|
|
616
|
+
created_at
|
|
617
|
+
FROM stream_backlog_monitor
|
|
618
|
+
WHERE namespace = :namespace
|
|
619
|
+
AND created_at >= :start_time
|
|
620
|
+
ORDER BY stream_name, consumer_group, created_at DESC
|
|
621
|
+
""")
|
|
622
|
+
params = {
|
|
623
|
+
'namespace': namespace,
|
|
624
|
+
'start_time': start_time
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
result = await session.execute(query, params)
|
|
628
|
+
rows = result.fetchall()
|
|
629
|
+
|
|
630
|
+
# 格式化数据
|
|
631
|
+
data = []
|
|
632
|
+
for row in rows:
|
|
633
|
+
data.append({
|
|
634
|
+
'stream_name': row.stream_name,
|
|
635
|
+
'consumer_group': row.consumer_group,
|
|
636
|
+
'last_published_offset': row.last_published_offset,
|
|
637
|
+
'last_delivered_offset': row.last_delivered_offset,
|
|
638
|
+
'last_acked_offset': row.last_acked_offset,
|
|
639
|
+
'pending_count': row.pending_count,
|
|
640
|
+
'backlog_undelivered': row.backlog_undelivered,
|
|
641
|
+
'backlog_unprocessed': row.backlog_unprocessed,
|
|
642
|
+
'created_at': row.created_at.isoformat() if row.created_at else None
|
|
643
|
+
})
|
|
644
|
+
|
|
645
|
+
return {
|
|
646
|
+
'success': True,
|
|
647
|
+
'data': data,
|
|
648
|
+
'total': len(data)
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
@staticmethod
|
|
652
|
+
async def get_stream_backlog_summary(data_access, namespace: str) -> Dict[str, Any]:
|
|
653
|
+
"""
|
|
654
|
+
获取Stream积压监控汇总数据
|
|
655
|
+
|
|
656
|
+
Args:
|
|
657
|
+
data_access: 数据访问层实例
|
|
658
|
+
namespace: 命名空间
|
|
659
|
+
|
|
660
|
+
Returns:
|
|
661
|
+
汇总数据
|
|
662
|
+
"""
|
|
663
|
+
async with data_access.AsyncSessionLocal() as session:
|
|
664
|
+
# 获取最新的汇总数据
|
|
665
|
+
query = text("""
|
|
666
|
+
WITH latest_data AS (
|
|
667
|
+
SELECT DISTINCT ON (stream_name, consumer_group)
|
|
668
|
+
stream_name,
|
|
669
|
+
consumer_group,
|
|
670
|
+
backlog_undelivered,
|
|
671
|
+
backlog_unprocessed,
|
|
672
|
+
pending_count
|
|
673
|
+
FROM stream_backlog_monitor
|
|
674
|
+
WHERE namespace = :namespace
|
|
675
|
+
AND created_at >= NOW() - INTERVAL '1 hour'
|
|
676
|
+
ORDER BY stream_name, consumer_group, created_at DESC
|
|
677
|
+
)
|
|
678
|
+
SELECT
|
|
679
|
+
COUNT(DISTINCT stream_name) as total_streams,
|
|
680
|
+
COUNT(DISTINCT consumer_group) as total_groups,
|
|
681
|
+
SUM(backlog_unprocessed) as total_backlog,
|
|
682
|
+
SUM(pending_count) as total_pending,
|
|
683
|
+
MAX(backlog_unprocessed) as max_backlog
|
|
684
|
+
FROM latest_data
|
|
685
|
+
""")
|
|
686
|
+
|
|
687
|
+
result = await session.execute(query, {'namespace': namespace})
|
|
688
|
+
row = result.fetchone()
|
|
689
|
+
|
|
690
|
+
if row:
|
|
691
|
+
return {
|
|
692
|
+
'success': True,
|
|
693
|
+
'data': {
|
|
694
|
+
'total_streams': row.total_streams or 0,
|
|
695
|
+
'total_groups': row.total_groups or 0,
|
|
696
|
+
'total_backlog': row.total_backlog or 0,
|
|
697
|
+
'total_pending': row.total_pending or 0,
|
|
698
|
+
'max_backlog': row.max_backlog or 0
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
else:
|
|
702
|
+
return {
|
|
703
|
+
'success': True,
|
|
704
|
+
'data': {
|
|
705
|
+
'total_streams': 0,
|
|
706
|
+
'total_groups': 0,
|
|
707
|
+
'total_backlog': 0,
|
|
708
|
+
'total_pending': 0,
|
|
709
|
+
'max_backlog': 0
|
|
710
|
+
}
|
|
711
|
+
}
|