jettask 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jettask/core/cli.py +152 -0
- jettask/pg_consumer/sql/add_execution_time_field.sql +29 -0
- jettask/pg_consumer/sql/create_new_tables.sql +137 -0
- jettask/pg_consumer/sql/create_tables_v3.sql +175 -0
- jettask/pg_consumer/sql/migrate_to_new_structure.sql +179 -0
- jettask/pg_consumer/sql/modify_time_fields.sql +69 -0
- jettask/webui/frontend/package.json +30 -0
- jettask/webui/frontend/src/App.css +109 -0
- jettask/webui/frontend/src/App.jsx +66 -0
- jettask/webui/frontend/src/components/NamespaceSelector.jsx +166 -0
- jettask/webui/frontend/src/components/QueueBacklogChart.jsx +298 -0
- jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +638 -0
- jettask/webui/frontend/src/components/QueueDetailsTable.css +65 -0
- jettask/webui/frontend/src/components/QueueDetailsTable.jsx +487 -0
- jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +465 -0
- jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +423 -0
- jettask/webui/frontend/src/components/TaskFilter.jsx +425 -0
- jettask/webui/frontend/src/components/TimeRangeSelector.css +21 -0
- jettask/webui/frontend/src/components/TimeRangeSelector.jsx +160 -0
- jettask/webui/frontend/src/components/charts/QueueChart.jsx +111 -0
- jettask/webui/frontend/src/components/charts/QueueTrendChart.jsx +115 -0
- jettask/webui/frontend/src/components/charts/WorkerChart.jsx +40 -0
- jettask/webui/frontend/src/components/common/StatsCard.jsx +18 -0
- jettask/webui/frontend/src/components/layout/AppLayout.css +95 -0
- jettask/webui/frontend/src/components/layout/AppLayout.jsx +49 -0
- jettask/webui/frontend/src/components/layout/Header.css +106 -0
- jettask/webui/frontend/src/components/layout/Header.jsx +106 -0
- jettask/webui/frontend/src/components/layout/SideMenu.css +137 -0
- jettask/webui/frontend/src/components/layout/SideMenu.jsx +209 -0
- jettask/webui/frontend/src/components/layout/TabsNav.css +244 -0
- jettask/webui/frontend/src/components/layout/TabsNav.jsx +206 -0
- jettask/webui/frontend/src/components/layout/UserInfo.css +197 -0
- jettask/webui/frontend/src/components/layout/UserInfo.jsx +197 -0
- jettask/webui/frontend/src/contexts/LoadingContext.jsx +27 -0
- jettask/webui/frontend/src/contexts/NamespaceContext.jsx +72 -0
- jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +245 -0
- jettask/webui/frontend/src/index.css +114 -0
- jettask/webui/frontend/src/main.jsx +20 -0
- jettask/webui/frontend/src/pages/Alerts.jsx +684 -0
- jettask/webui/frontend/src/pages/Dashboard/index.css +35 -0
- jettask/webui/frontend/src/pages/Dashboard/index.jsx +281 -0
- jettask/webui/frontend/src/pages/Dashboard.jsx +1330 -0
- jettask/webui/frontend/src/pages/QueueDetail.jsx +1117 -0
- jettask/webui/frontend/src/pages/QueueMonitor.jsx +527 -0
- jettask/webui/frontend/src/pages/Queues.jsx +12 -0
- jettask/webui/frontend/src/pages/ScheduledTasks.jsx +809 -0
- jettask/webui/frontend/src/pages/Settings.jsx +800 -0
- jettask/webui/frontend/src/pages/Workers.jsx +12 -0
- jettask/webui/frontend/src/services/api.js +114 -0
- jettask/webui/frontend/src/services/queueTrend.js +152 -0
- jettask/webui/frontend/src/utils/suppressWarnings.js +22 -0
- jettask/webui/frontend/src/utils/userPreferences.js +154 -0
- jettask/webui/frontend/vite.config.js +26 -0
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/METADATA +1 -1
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/RECORD +59 -14
- jettask/webui/static/dist/assets/index-7129cfe1.css +0 -1
- jettask/webui/static/dist/assets/index-8d1935cc.js +0 -774
- jettask/webui/static/dist/index.html +0 -15
- jettask/webui/static/index.html +0 -1734
- jettask/webui/static/queue.html +0 -981
- jettask/webui/static/queues.html +0 -549
- jettask/webui/static/workers.html +0 -734
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/WHEEL +0 -0
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/entry_points.txt +0 -0
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/licenses/LICENSE +0 -0
- {jettask-0.2.7.dist-info → jettask-0.2.8.dist-info}/top_level.txt +0 -0
jettask/core/cli.py
CHANGED
@@ -474,6 +474,158 @@ def scheduler(task_center, interval, batch_size, check_interval, debug):
|
|
474
474
|
except KeyboardInterrupt:
|
475
475
|
click.echo("\nShutdown complete")
|
476
476
|
|
477
|
+
@cli.command()
|
478
|
+
@click.option('--port', default=5173, help='前端开发服务器端口')
|
479
|
+
@click.option('--host', default='0.0.0.0', help='前端开发服务器监听地址')
|
480
|
+
@click.option('--auto-install', is_flag=True, default=True, help='自动安装缺失的依赖')
|
481
|
+
@click.option('--force-install', is_flag=True, help='强制重新安装依赖')
|
482
|
+
@click.option('--build', is_flag=True, help='构建生产版本而不是启动开发服务器')
|
483
|
+
def frontend(port, host, auto_install, force_install, build):
|
484
|
+
"""启动 WebUI 前端界面
|
485
|
+
|
486
|
+
自动检测环境并启动前端开发服务器:
|
487
|
+
1. 检查 Node.js 和 npm 是否安装
|
488
|
+
2. 自动安装缺失的依赖
|
489
|
+
3. 启动开发服务器或构建生产版本
|
490
|
+
|
491
|
+
示例:
|
492
|
+
# 启动开发服务器(默认)
|
493
|
+
jettask frontend
|
494
|
+
|
495
|
+
# 指定端口
|
496
|
+
jettask frontend --port 3000
|
497
|
+
|
498
|
+
# 构建生产版本
|
499
|
+
jettask frontend --build
|
500
|
+
|
501
|
+
# 强制重新安装依赖
|
502
|
+
jettask frontend --force-install
|
503
|
+
"""
|
504
|
+
import subprocess
|
505
|
+
import shutil
|
506
|
+
from pathlib import Path
|
507
|
+
|
508
|
+
# 获取前端目录路径
|
509
|
+
frontend_dir = Path(__file__).parent.parent / "webui" / "frontend"
|
510
|
+
if not frontend_dir.exists():
|
511
|
+
click.echo(f"错误:前端目录不存在: {frontend_dir}", err=True)
|
512
|
+
sys.exit(1)
|
513
|
+
|
514
|
+
# 检查 Node.js 是否安装
|
515
|
+
node_cmd = shutil.which('node')
|
516
|
+
if not node_cmd:
|
517
|
+
click.echo("错误:未检测到 Node.js 环境", err=True)
|
518
|
+
click.echo("\n请先安装 Node.js:")
|
519
|
+
click.echo(" - Ubuntu/Debian: sudo apt-get install nodejs npm")
|
520
|
+
click.echo(" - macOS: brew install node")
|
521
|
+
click.echo(" - Windows: 从 https://nodejs.org 下载安装")
|
522
|
+
click.echo(" - 或使用 nvm: curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash")
|
523
|
+
sys.exit(1)
|
524
|
+
|
525
|
+
# 检查 npm 是否安装
|
526
|
+
npm_cmd = shutil.which('npm')
|
527
|
+
if not npm_cmd:
|
528
|
+
click.echo("错误:未检测到 npm", err=True)
|
529
|
+
click.echo("\n请安装 npm:")
|
530
|
+
click.echo(" - Ubuntu/Debian: sudo apt-get install npm")
|
531
|
+
click.echo(" - 或重新安装 Node.js (包含 npm)")
|
532
|
+
sys.exit(1)
|
533
|
+
|
534
|
+
# 显示版本信息
|
535
|
+
try:
|
536
|
+
node_version = subprocess.check_output([node_cmd, '--version'], text=True).strip()
|
537
|
+
npm_version = subprocess.check_output([npm_cmd, '--version'], text=True).strip()
|
538
|
+
click.echo(f"检测到 Node.js {node_version}, npm {npm_version}")
|
539
|
+
except subprocess.CalledProcessError:
|
540
|
+
pass
|
541
|
+
|
542
|
+
# 切换到前端目录
|
543
|
+
os.chdir(frontend_dir)
|
544
|
+
click.echo(f"切换到前端目录: {frontend_dir}")
|
545
|
+
|
546
|
+
# 检查 package.json 是否存在
|
547
|
+
package_json = frontend_dir / "package.json"
|
548
|
+
if not package_json.exists():
|
549
|
+
click.echo("错误:package.json 文件不存在", err=True)
|
550
|
+
sys.exit(1)
|
551
|
+
|
552
|
+
# 检查 node_modules 是否存在
|
553
|
+
node_modules = frontend_dir / "node_modules"
|
554
|
+
need_install = not node_modules.exists() or force_install
|
555
|
+
|
556
|
+
if need_install and auto_install:
|
557
|
+
click.echo("\n正在安装依赖...")
|
558
|
+
try:
|
559
|
+
# 清理旧的 node_modules(如果强制安装)
|
560
|
+
if force_install and node_modules.exists():
|
561
|
+
click.echo("清理旧的 node_modules...")
|
562
|
+
shutil.rmtree(node_modules)
|
563
|
+
|
564
|
+
# 清理 package-lock.json(如果有问题)
|
565
|
+
lock_file = frontend_dir / "package-lock.json"
|
566
|
+
if force_install and lock_file.exists():
|
567
|
+
lock_file.unlink()
|
568
|
+
|
569
|
+
# 运行 npm install
|
570
|
+
subprocess.run([npm_cmd, 'install'], check=True)
|
571
|
+
click.echo("✓ 依赖安装完成")
|
572
|
+
except subprocess.CalledProcessError as e:
|
573
|
+
click.echo(f"错误:依赖安装失败 - {e}", err=True)
|
574
|
+
click.echo("\n可以尝试:")
|
575
|
+
click.echo(" 1. 手动删除 node_modules 目录后重试")
|
576
|
+
click.echo(" 2. 使用 --force-install 参数强制重新安装")
|
577
|
+
click.echo(" 3. 检查网络连接和 npm 源设置")
|
578
|
+
sys.exit(1)
|
579
|
+
elif need_install and not auto_install:
|
580
|
+
click.echo("警告:未检测到 node_modules,请运行 'npm install' 安装依赖")
|
581
|
+
if not click.confirm("是否现在安装依赖?"):
|
582
|
+
sys.exit(1)
|
583
|
+
|
584
|
+
# 构建或启动
|
585
|
+
try:
|
586
|
+
if build:
|
587
|
+
# 构建生产版本
|
588
|
+
click.echo("\n正在构建生产版本...")
|
589
|
+
subprocess.run([npm_cmd, 'run', 'build'], check=True)
|
590
|
+
|
591
|
+
# 显示构建结果
|
592
|
+
dist_dir = frontend_dir.parent / "static" / "dist"
|
593
|
+
if dist_dir.exists():
|
594
|
+
click.echo(f"\n✓ 构建完成!输出目录: {dist_dir}")
|
595
|
+
# 统计文件
|
596
|
+
files = list(dist_dir.rglob('*'))
|
597
|
+
file_count = len([f for f in files if f.is_file()])
|
598
|
+
total_size = sum(f.stat().st_size for f in files if f.is_file())
|
599
|
+
click.echo(f" - 文件数量: {file_count}")
|
600
|
+
click.echo(f" - 总大小: {total_size / 1024 / 1024:.2f} MB")
|
601
|
+
else:
|
602
|
+
click.echo("警告:构建完成但未找到输出目录")
|
603
|
+
else:
|
604
|
+
# 启动开发服务器
|
605
|
+
click.echo(f"\n正在启动前端开发服务器...")
|
606
|
+
click.echo(f" - 地址: http://{host}:{port}")
|
607
|
+
click.echo(f" - 本地访问: http://localhost:{port}")
|
608
|
+
click.echo("\n按 Ctrl+C 停止服务器\n")
|
609
|
+
|
610
|
+
# 构建启动命令
|
611
|
+
cmd = [npm_cmd, 'run', 'dev']
|
612
|
+
if host != 'localhost':
|
613
|
+
cmd.extend(['--', '--host', host])
|
614
|
+
if port != 5173:
|
615
|
+
cmd.extend(['--port', str(port)])
|
616
|
+
|
617
|
+
# 启动开发服务器
|
618
|
+
process = subprocess.Popen(cmd)
|
619
|
+
process.wait()
|
620
|
+
except subprocess.CalledProcessError as e:
|
621
|
+
click.echo(f"错误:命令执行失败 - {e}", err=True)
|
622
|
+
sys.exit(1)
|
623
|
+
except KeyboardInterrupt:
|
624
|
+
click.echo("\n停止前端服务器")
|
625
|
+
if 'process' in locals():
|
626
|
+
process.terminate()
|
627
|
+
process.wait()
|
628
|
+
|
477
629
|
def main():
|
478
630
|
"""主入口函数"""
|
479
631
|
try:
|
@@ -0,0 +1,29 @@
|
|
1
|
+
-- 添加execution_time_ms字段到task_runs表
|
2
|
+
-- execution_time_ms: 实际执行时间(毫秒),从任务开始执行到执行完成的时间
|
3
|
+
-- duration_ms: 总耗时(毫秒),从任务创建到执行完成的时间
|
4
|
+
|
5
|
+
-- 检查字段是否存在,如果不存在则添加
|
6
|
+
DO $$
|
7
|
+
BEGIN
|
8
|
+
IF NOT EXISTS (
|
9
|
+
SELECT 1
|
10
|
+
FROM information_schema.columns
|
11
|
+
WHERE table_name = 'task_runs'
|
12
|
+
AND column_name = 'execution_time_ms'
|
13
|
+
) THEN
|
14
|
+
ALTER TABLE task_runs
|
15
|
+
ADD COLUMN execution_time_ms BIGINT;
|
16
|
+
|
17
|
+
COMMENT ON COLUMN task_runs.execution_time_ms IS '实际执行时间(毫秒),从任务开始执行到执行完成的时间';
|
18
|
+
|
19
|
+
-- 为已有数据计算execution_time_ms(如果有start_time和end_time)
|
20
|
+
UPDATE task_runs
|
21
|
+
SET execution_time_ms = EXTRACT(EPOCH FROM (end_time - start_time)) * 1000
|
22
|
+
WHERE start_time IS NOT NULL
|
23
|
+
AND end_time IS NOT NULL
|
24
|
+
AND execution_time_ms IS NULL;
|
25
|
+
END IF;
|
26
|
+
END $$;
|
27
|
+
|
28
|
+
-- 确保duration_ms字段的注释正确
|
29
|
+
COMMENT ON COLUMN task_runs.duration_ms IS '总耗时(毫秒),从任务创建到执行完成的时间';
|
@@ -0,0 +1,137 @@
|
|
1
|
+
-- 新的任务表结构设计
|
2
|
+
-- 用于支持多消费者组的任务管理
|
3
|
+
|
4
|
+
-- 1. 任务基础信息表 (tasks)
|
5
|
+
-- 存储任务的元数据,一个任务只有一条记录
|
6
|
+
CREATE TABLE IF NOT EXISTS tasks (
|
7
|
+
id BIGSERIAL PRIMARY KEY, -- 内部主键
|
8
|
+
stream_id TEXT UNIQUE NOT NULL, -- Redis Stream的message id (例如: 1757039473571-0)
|
9
|
+
queue TEXT NOT NULL, -- 队列名称
|
10
|
+
namespace TEXT, -- 命名空间(多租户支持)
|
11
|
+
scheduled_task_id TEXT, -- 调度任务ID
|
12
|
+
payload JSONB NOT NULL, -- 任务参数(完整的event_data)
|
13
|
+
priority INT DEFAULT 0, -- 任务优先级
|
14
|
+
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, -- 任务创建时间
|
15
|
+
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, -- 更新时间
|
16
|
+
source TEXT, -- 任务来源(例如:api/scheduler/manual)
|
17
|
+
metadata JSONB DEFAULT '{}'::jsonb -- 额外的元数据
|
18
|
+
);
|
19
|
+
|
20
|
+
-- 为stream_id创建唯一索引
|
21
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_tasks_stream_id ON tasks(stream_id);
|
22
|
+
|
23
|
+
-- 为queue创建索引,方便查询
|
24
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_queue ON tasks(queue);
|
25
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_namespace ON tasks(namespace);
|
26
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_created_at ON tasks(created_at);
|
27
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_scheduled_task_id ON tasks(scheduled_task_id);
|
28
|
+
|
29
|
+
-- 为queue和namespace创建复合索引,优化查询
|
30
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_queue_namespace ON tasks(queue, namespace);
|
31
|
+
|
32
|
+
-- 2. 任务运行记录表 (task_runs)
|
33
|
+
-- 记录每个消费者组对任务的执行情况
|
34
|
+
CREATE TABLE IF NOT EXISTS task_runs (
|
35
|
+
id BIGSERIAL PRIMARY KEY, -- 内部主键
|
36
|
+
task_id BIGINT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE, -- 关联任务ID
|
37
|
+
stream_id TEXT NOT NULL, -- 冗余存储stream_id方便查询
|
38
|
+
task_name TEXT NOT NULL, -- 任务名称
|
39
|
+
consumer_group TEXT NOT NULL, -- 消费者组名称
|
40
|
+
consumer_name TEXT, -- 具体的消费者实例名
|
41
|
+
worker_id TEXT, -- Worker ID
|
42
|
+
status TEXT NOT NULL DEFAULT 'pending', -- 执行状态(pending/running/success/failed/retrying/timeout/skipped)
|
43
|
+
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, -- 创建时间
|
44
|
+
start_time TIMESTAMPTZ, -- 开始执行时间
|
45
|
+
end_time TIMESTAMPTZ, -- 结束时间
|
46
|
+
duration DOUBLE PRECISION, -- 执行耗时(秒),由应用层计算
|
47
|
+
retry_count INT DEFAULT 0, -- 重试次数
|
48
|
+
max_retries INT DEFAULT 3, -- 最大重试次数
|
49
|
+
error_message TEXT, -- 错误信息
|
50
|
+
error_details JSONB, -- 详细错误信息(包含堆栈等)
|
51
|
+
result JSONB, -- 执行结果
|
52
|
+
logs TEXT[], -- 执行日志
|
53
|
+
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP -- 最后更新时间
|
54
|
+
);
|
55
|
+
|
56
|
+
-- 为task_id创建索引
|
57
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_task_id ON task_runs(task_id);
|
58
|
+
|
59
|
+
-- 为stream_id创建索引
|
60
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_stream_id ON task_runs(stream_id);
|
61
|
+
|
62
|
+
-- 为consumer_group创建索引
|
63
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_consumer_group ON task_runs(consumer_group);
|
64
|
+
|
65
|
+
-- 为status创建索引
|
66
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_status ON task_runs(status);
|
67
|
+
|
68
|
+
-- 创建复合索引优化查询
|
69
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_task_group ON task_runs(task_id, consumer_group);
|
70
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_group_status ON task_runs(consumer_group, status);
|
71
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_stream_group ON task_runs(stream_id, consumer_group);
|
72
|
+
|
73
|
+
-- 为了保证同一个任务在同一个消费者组中只有一条运行记录,创建唯一约束
|
74
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_task_runs_unique_task_group ON task_runs(task_id, consumer_group);
|
75
|
+
|
76
|
+
-- 更新时间触发器
|
77
|
+
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
78
|
+
RETURNS TRIGGER AS $$
|
79
|
+
BEGIN
|
80
|
+
NEW.updated_at = CURRENT_TIMESTAMP;
|
81
|
+
RETURN NEW;
|
82
|
+
END;
|
83
|
+
$$ language 'plpgsql';
|
84
|
+
|
85
|
+
-- 为task_runs表创建触发器
|
86
|
+
DROP TRIGGER IF EXISTS update_task_runs_updated_at ON task_runs;
|
87
|
+
CREATE TRIGGER update_task_runs_updated_at
|
88
|
+
BEFORE UPDATE ON task_runs
|
89
|
+
FOR EACH ROW
|
90
|
+
EXECUTE FUNCTION update_updated_at_column();
|
91
|
+
|
92
|
+
-- 3. 创建视图方便查询
|
93
|
+
-- 任务执行概览视图
|
94
|
+
CREATE OR REPLACE VIEW task_execution_overview AS
|
95
|
+
SELECT
|
96
|
+
t.id,
|
97
|
+
t.stream_id,
|
98
|
+
t.queue,
|
99
|
+
t.namespace,
|
100
|
+
t.scheduled_task_id,
|
101
|
+
t.created_at,
|
102
|
+
COUNT(DISTINCT tr.consumer_group) as consumer_group_count,
|
103
|
+
COUNT(tr.id) as total_runs,
|
104
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_count,
|
105
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_count,
|
106
|
+
COUNT(CASE WHEN tr.status = 'running' THEN 1 END) as running_count,
|
107
|
+
AVG(tr.duration * 1000) as avg_duration_ms, -- 转换为毫秒
|
108
|
+
MAX(tr.end_time) as last_execution_time
|
109
|
+
FROM tasks t
|
110
|
+
LEFT JOIN task_runs tr ON t.id = tr.task_id
|
111
|
+
GROUP BY t.id;
|
112
|
+
|
113
|
+
-- 消费者组执行统计视图
|
114
|
+
CREATE OR REPLACE VIEW consumer_group_stats AS
|
115
|
+
SELECT
|
116
|
+
tr.consumer_group,
|
117
|
+
tr.task_name,
|
118
|
+
t.queue,
|
119
|
+
t.namespace,
|
120
|
+
COUNT(DISTINCT tr.task_id) as total_tasks,
|
121
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_count,
|
122
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_count,
|
123
|
+
COUNT(CASE WHEN tr.status = 'running' THEN 1 END) as running_count,
|
124
|
+
AVG(tr.duration * 1000) as avg_duration_ms, -- 转换为毫秒
|
125
|
+
SUM(tr.retry_count) as total_retries,
|
126
|
+
MAX(tr.end_time) as last_activity
|
127
|
+
FROM task_runs tr
|
128
|
+
JOIN tasks t ON tr.task_id = t.id
|
129
|
+
GROUP BY tr.consumer_group, tr.task_name, t.queue, t.namespace;
|
130
|
+
|
131
|
+
-- 添加注释
|
132
|
+
COMMENT ON TABLE tasks IS '任务基础信息表,存储任务的元数据';
|
133
|
+
COMMENT ON TABLE task_runs IS '任务运行记录表,记录每个消费者组对任务的执行情况';
|
134
|
+
COMMENT ON COLUMN tasks.stream_id IS 'Redis Stream的消息ID,确保幂等性';
|
135
|
+
COMMENT ON COLUMN tasks.payload IS '任务参数,存储完整的event_data';
|
136
|
+
COMMENT ON COLUMN task_runs.consumer_group IS '消费者组名称,格式如:jettask:QUEUE:queue_name:task_name';
|
137
|
+
COMMENT ON COLUMN task_runs.duration IS '执行耗时(秒),自动计算';
|
@@ -0,0 +1,175 @@
|
|
1
|
+
-- 优化后的任务表结构设计 V3
|
2
|
+
-- 根据实际需求调整字段
|
3
|
+
|
4
|
+
-- 1. 任务基础信息表 (tasks)
|
5
|
+
-- 存储任务的元数据,一个任务只有一条记录
|
6
|
+
CREATE TABLE IF NOT EXISTS tasks (
|
7
|
+
id BIGSERIAL PRIMARY KEY, -- 内部主键
|
8
|
+
stream_id TEXT UNIQUE NOT NULL, -- Redis Stream的message id (例如: 1757039473571-0)
|
9
|
+
queue TEXT NOT NULL, -- 队列名称
|
10
|
+
namespace TEXT DEFAULT 'default', -- 命名空间
|
11
|
+
scheduled_task_id TEXT, -- 调度任务ID(如果是调度任务产生的)
|
12
|
+
payload JSONB NOT NULL, -- 任务参数(完整的event_data)
|
13
|
+
priority INT DEFAULT 0, -- 任务优先级
|
14
|
+
source TEXT, -- 任务来源(例如:api/scheduler/manual)
|
15
|
+
metadata JSONB DEFAULT '{}'::jsonb, -- 额外的元数据
|
16
|
+
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP -- 任务创建时间
|
17
|
+
);
|
18
|
+
|
19
|
+
-- 为stream_id创建唯一索引
|
20
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_tasks_stream_id ON tasks(stream_id);
|
21
|
+
|
22
|
+
-- 为queue创建索引,方便查询
|
23
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_queue ON tasks(queue);
|
24
|
+
|
25
|
+
-- 为namespace创建索引
|
26
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_namespace ON tasks(namespace);
|
27
|
+
|
28
|
+
-- 为scheduled_task_id创建索引
|
29
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_scheduled_task_id ON tasks(scheduled_task_id);
|
30
|
+
|
31
|
+
-- 为created_at创建索引
|
32
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_created_at ON tasks(created_at);
|
33
|
+
|
34
|
+
-- 为namespace和queue创建复合索引
|
35
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_namespace_queue ON tasks(namespace, queue);
|
36
|
+
|
37
|
+
-- 为namespace和scheduled_task_id创建复合索引
|
38
|
+
CREATE INDEX IF NOT EXISTS idx_tasks_namespace_scheduled ON tasks(namespace, scheduled_task_id) WHERE scheduled_task_id IS NOT NULL;
|
39
|
+
|
40
|
+
-- 2. 任务运行记录表 (task_runs)
|
41
|
+
-- 记录每个消费者组对任务的执行情况
|
42
|
+
CREATE TABLE IF NOT EXISTS task_runs (
|
43
|
+
id BIGSERIAL PRIMARY KEY, -- 内部主键
|
44
|
+
task_id BIGINT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE, -- 关联任务ID
|
45
|
+
stream_id TEXT NOT NULL, -- 冗余存储stream_id方便查询
|
46
|
+
task_name TEXT NOT NULL, -- 任务名称(执行的具体任务函数)
|
47
|
+
consumer_group TEXT NOT NULL, -- 消费者组名称
|
48
|
+
consumer_name TEXT, -- 具体的消费者实例名
|
49
|
+
worker_id TEXT, -- Worker ID
|
50
|
+
status TEXT NOT NULL DEFAULT 'pending', -- 执行状态(pending/running/success/failed/retrying/timeout/skipped)
|
51
|
+
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, -- 记录创建时间
|
52
|
+
start_time TIMESTAMPTZ, -- 开始执行时间
|
53
|
+
end_time TIMESTAMPTZ, -- 结束时间
|
54
|
+
duration_ms BIGINT, -- 执行耗时(毫秒)
|
55
|
+
retry_count INT DEFAULT 0, -- 重试次数
|
56
|
+
max_retries INT DEFAULT 3, -- 最大重试次数
|
57
|
+
error_message TEXT, -- 错误信息
|
58
|
+
error_details JSONB, -- 详细错误信息(包含堆栈等)
|
59
|
+
result JSONB, -- 执行结果
|
60
|
+
logs TEXT[], -- 执行日志
|
61
|
+
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP -- 最后更新时间
|
62
|
+
);
|
63
|
+
|
64
|
+
-- 为task_id创建索引
|
65
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_task_id ON task_runs(task_id);
|
66
|
+
|
67
|
+
-- 为stream_id创建索引
|
68
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_stream_id ON task_runs(stream_id);
|
69
|
+
|
70
|
+
-- 为task_name创建索引
|
71
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_task_name ON task_runs(task_name);
|
72
|
+
|
73
|
+
-- 为consumer_group创建索引
|
74
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_consumer_group ON task_runs(consumer_group);
|
75
|
+
|
76
|
+
-- 为status创建索引
|
77
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_status ON task_runs(status);
|
78
|
+
|
79
|
+
-- 为created_at创建索引
|
80
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_created_at ON task_runs(created_at);
|
81
|
+
|
82
|
+
-- 创建复合索引优化查询
|
83
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_task_group ON task_runs(task_id, consumer_group);
|
84
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_group_status ON task_runs(consumer_group, status);
|
85
|
+
CREATE INDEX IF NOT EXISTS idx_task_runs_stream_group ON task_runs(stream_id, consumer_group);
|
86
|
+
|
87
|
+
-- 为了保证同一个任务在同一个消费者组中只有一条运行记录,创建唯一约束
|
88
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_task_runs_unique_task_group ON task_runs(task_id, consumer_group);
|
89
|
+
|
90
|
+
-- 更新时间触发器
|
91
|
+
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
92
|
+
RETURNS TRIGGER AS $$
|
93
|
+
BEGIN
|
94
|
+
NEW.updated_at = CURRENT_TIMESTAMP;
|
95
|
+
RETURN NEW;
|
96
|
+
END;
|
97
|
+
$$ language 'plpgsql';
|
98
|
+
|
99
|
+
-- 为task_runs表创建触发器
|
100
|
+
DROP TRIGGER IF EXISTS update_task_runs_updated_at ON task_runs;
|
101
|
+
CREATE TRIGGER update_task_runs_updated_at
|
102
|
+
BEFORE UPDATE ON task_runs
|
103
|
+
FOR EACH ROW
|
104
|
+
EXECUTE FUNCTION update_updated_at_column();
|
105
|
+
|
106
|
+
-- 3. 创建视图方便查询
|
107
|
+
-- 任务执行概览视图
|
108
|
+
CREATE OR REPLACE VIEW task_execution_overview AS
|
109
|
+
SELECT
|
110
|
+
t.id,
|
111
|
+
t.stream_id,
|
112
|
+
t.queue,
|
113
|
+
t.namespace,
|
114
|
+
t.scheduled_task_id,
|
115
|
+
t.created_at,
|
116
|
+
COUNT(DISTINCT tr.consumer_group) as consumer_group_count,
|
117
|
+
COUNT(tr.id) as total_runs,
|
118
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_count,
|
119
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_count,
|
120
|
+
COUNT(CASE WHEN tr.status = 'running' THEN 1 END) as running_count,
|
121
|
+
AVG(tr.duration_ms) as avg_duration_ms,
|
122
|
+
MAX(tr.end_time) as last_execution_time
|
123
|
+
FROM tasks t
|
124
|
+
LEFT JOIN task_runs tr ON t.id = tr.task_id
|
125
|
+
GROUP BY t.id;
|
126
|
+
|
127
|
+
-- 消费者组执行统计视图
|
128
|
+
CREATE OR REPLACE VIEW consumer_group_stats AS
|
129
|
+
SELECT
|
130
|
+
tr.consumer_group,
|
131
|
+
tr.task_name,
|
132
|
+
t.queue,
|
133
|
+
t.namespace,
|
134
|
+
COUNT(DISTINCT tr.task_id) as total_tasks,
|
135
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_count,
|
136
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_count,
|
137
|
+
COUNT(CASE WHEN tr.status = 'running' THEN 1 END) as running_count,
|
138
|
+
AVG(tr.duration_ms) as avg_duration_ms,
|
139
|
+
SUM(tr.retry_count) as total_retries,
|
140
|
+
MAX(tr.end_time) as last_activity
|
141
|
+
FROM task_runs tr
|
142
|
+
JOIN tasks t ON tr.task_id = t.id
|
143
|
+
GROUP BY tr.consumer_group, tr.task_name, t.queue, t.namespace;
|
144
|
+
|
145
|
+
-- 按命名空间的任务统计视图
|
146
|
+
CREATE OR REPLACE VIEW namespace_task_stats AS
|
147
|
+
SELECT
|
148
|
+
t.namespace,
|
149
|
+
t.queue,
|
150
|
+
COUNT(DISTINCT t.id) as total_tasks,
|
151
|
+
COUNT(DISTINCT t.scheduled_task_id) as scheduled_tasks,
|
152
|
+
COUNT(DISTINCT tr.task_name) as unique_task_names,
|
153
|
+
COUNT(DISTINCT tr.consumer_group) as consumer_groups,
|
154
|
+
COUNT(tr.id) as total_runs,
|
155
|
+
COUNT(CASE WHEN tr.status = 'success' THEN 1 END) as success_runs,
|
156
|
+
COUNT(CASE WHEN tr.status = 'failed' THEN 1 END) as failed_runs,
|
157
|
+
AVG(tr.duration_ms) as avg_duration_ms,
|
158
|
+
MIN(t.created_at) as first_task_at,
|
159
|
+
MAX(t.created_at) as last_task_at
|
160
|
+
FROM tasks t
|
161
|
+
LEFT JOIN task_runs tr ON t.id = tr.task_id
|
162
|
+
GROUP BY t.namespace, t.queue;
|
163
|
+
|
164
|
+
-- 添加注释
|
165
|
+
COMMENT ON TABLE tasks IS '任务基础信息表,存储任务的元数据';
|
166
|
+
COMMENT ON TABLE task_runs IS '任务运行记录表,记录每个消费者组对任务的执行情况';
|
167
|
+
COMMENT ON COLUMN tasks.stream_id IS 'Redis Stream的消息ID,确保幂等性';
|
168
|
+
COMMENT ON COLUMN tasks.namespace IS '命名空间,用于多租户隔离';
|
169
|
+
COMMENT ON COLUMN tasks.scheduled_task_id IS '调度任务ID,标识该任务是否由调度器产生';
|
170
|
+
COMMENT ON COLUMN tasks.payload IS '任务参数,存储完整的event_data';
|
171
|
+
COMMENT ON COLUMN task_runs.task_name IS '具体执行的任务函数名称';
|
172
|
+
COMMENT ON COLUMN task_runs.consumer_group IS '消费者组名称,格式如:jettask:QUEUE:queue_name:task_name';
|
173
|
+
COMMENT ON COLUMN task_runs.created_at IS '记录创建时间,标记任务何时被消费者接收';
|
174
|
+
COMMENT ON COLUMN task_runs.start_time IS '实际开始执行时间,可能与created_at不同';
|
175
|
+
COMMENT ON COLUMN task_runs.duration_ms IS '执行耗时(毫秒),由应用层计算';
|
@@ -0,0 +1,179 @@
|
|
1
|
+
-- 数据迁移脚本
|
2
|
+
-- 从旧的单表结构迁移到新的双表结构
|
3
|
+
|
4
|
+
BEGIN;
|
5
|
+
|
6
|
+
-- 1. 先备份原表(如果存在)
|
7
|
+
DO $$
|
8
|
+
BEGIN
|
9
|
+
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'tasks') THEN
|
10
|
+
-- 如果tasks表已存在,重命名为备份表
|
11
|
+
IF NOT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'tasks_backup') THEN
|
12
|
+
ALTER TABLE tasks RENAME TO tasks_backup;
|
13
|
+
ELSE
|
14
|
+
-- 如果备份表已存在,创建带时间戳的备份
|
15
|
+
EXECUTE format('ALTER TABLE tasks RENAME TO tasks_backup_%s',
|
16
|
+
to_char(now(), 'YYYYMMDD_HH24MISS'));
|
17
|
+
END IF;
|
18
|
+
END IF;
|
19
|
+
END $$;
|
20
|
+
|
21
|
+
-- 2. 执行创建新表的SQL(引用create_new_tables.sql的内容)
|
22
|
+
-- 注意:在实际使用时,应该先运行create_new_tables.sql
|
23
|
+
|
24
|
+
-- 3. 如果有旧数据,进行数据迁移
|
25
|
+
DO $$
|
26
|
+
BEGIN
|
27
|
+
-- 检查是否有备份表
|
28
|
+
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'tasks_backup') THEN
|
29
|
+
-- 迁移任务基础信息到新的tasks表
|
30
|
+
INSERT INTO tasks (
|
31
|
+
stream_id,
|
32
|
+
queue,
|
33
|
+
task_name,
|
34
|
+
task_type,
|
35
|
+
payload,
|
36
|
+
priority,
|
37
|
+
created_at,
|
38
|
+
scheduled_at,
|
39
|
+
status,
|
40
|
+
source,
|
41
|
+
metadata
|
42
|
+
)
|
43
|
+
SELECT
|
44
|
+
COALESCE(task_id, 'unknown-' || id::text) as stream_id, -- 使用task_id作为stream_id
|
45
|
+
COALESCE(queue_name, 'default') as queue,
|
46
|
+
COALESCE(task_name, 'unknown') as task_name,
|
47
|
+
task_type,
|
48
|
+
COALESCE(task_data, '{}'::jsonb) as payload,
|
49
|
+
COALESCE(priority, 0) as priority,
|
50
|
+
created_at,
|
51
|
+
trigger_time as scheduled_at,
|
52
|
+
CASE
|
53
|
+
WHEN status IN ('pending', 'running', 'completed', 'failed') THEN status
|
54
|
+
WHEN status = 'success' THEN 'completed'
|
55
|
+
ELSE 'pending'
|
56
|
+
END as status,
|
57
|
+
'migration' as source,
|
58
|
+
jsonb_build_object(
|
59
|
+
'migrated_at', now(),
|
60
|
+
'original_id', id,
|
61
|
+
'original_status', status
|
62
|
+
) as metadata
|
63
|
+
FROM tasks_backup
|
64
|
+
ON CONFLICT (stream_id) DO NOTHING; -- 避免重复
|
65
|
+
|
66
|
+
-- 迁移执行记录到task_runs表
|
67
|
+
-- 由于旧表是单表结构,我们为每个任务创建一条运行记录
|
68
|
+
INSERT INTO task_runs (
|
69
|
+
task_id,
|
70
|
+
stream_id,
|
71
|
+
consumer_group,
|
72
|
+
consumer_name,
|
73
|
+
worker_id,
|
74
|
+
status,
|
75
|
+
start_time,
|
76
|
+
end_time,
|
77
|
+
retry_count,
|
78
|
+
error_message,
|
79
|
+
result
|
80
|
+
)
|
81
|
+
SELECT
|
82
|
+
t.id as task_id,
|
83
|
+
t.stream_id,
|
84
|
+
'default_group' as consumer_group, -- 默认消费者组
|
85
|
+
tb.consumer as consumer_name,
|
86
|
+
tb.consumer as worker_id,
|
87
|
+
CASE
|
88
|
+
WHEN tb.status = 'success' THEN 'success'
|
89
|
+
WHEN tb.status IN ('failed', 'error') THEN 'failed'
|
90
|
+
WHEN tb.status = 'running' THEN 'running'
|
91
|
+
WHEN tb.status = 'timeout' THEN 'timeout'
|
92
|
+
ELSE 'pending'
|
93
|
+
END as status,
|
94
|
+
tb.started_at as start_time,
|
95
|
+
tb.completed_at as end_time,
|
96
|
+
COALESCE(tb.retry_count, 0) as retry_count,
|
97
|
+
tb.error as error_message,
|
98
|
+
tb.result
|
99
|
+
FROM tasks_backup tb
|
100
|
+
JOIN tasks t ON t.stream_id = COALESCE(tb.task_id, 'unknown-' || tb.id::text)
|
101
|
+
WHERE tb.status IS NOT NULL;
|
102
|
+
|
103
|
+
RAISE NOTICE 'Data migration completed successfully';
|
104
|
+
ELSE
|
105
|
+
RAISE NOTICE 'No backup table found, skipping data migration';
|
106
|
+
END IF;
|
107
|
+
END $$;
|
108
|
+
|
109
|
+
-- 4. 创建兼容性视图(可选)
|
110
|
+
-- 如果有代码依赖旧的表结构,可以创建视图提供兼容性
|
111
|
+
CREATE OR REPLACE VIEW tasks_legacy AS
|
112
|
+
SELECT
|
113
|
+
t.id,
|
114
|
+
t.stream_id as task_id,
|
115
|
+
t.queue as queue_name,
|
116
|
+
t.task_name,
|
117
|
+
t.task_type,
|
118
|
+
t.payload as task_data,
|
119
|
+
t.priority,
|
120
|
+
tr.consumer_name as consumer,
|
121
|
+
tr.status,
|
122
|
+
tr.start_time as started_at,
|
123
|
+
tr.end_time as completed_at,
|
124
|
+
tr.retry_count,
|
125
|
+
tr.error_message as error,
|
126
|
+
tr.result,
|
127
|
+
t.created_at,
|
128
|
+
tr.updated_at,
|
129
|
+
t.scheduled_at as trigger_time
|
130
|
+
FROM tasks t
|
131
|
+
LEFT JOIN task_runs tr ON t.id = tr.task_id
|
132
|
+
AND tr.consumer_group = 'default_group'; -- 兼容旧代码,只显示默认组
|
133
|
+
|
134
|
+
-- 5. 更新序列(如果需要)
|
135
|
+
DO $$
|
136
|
+
DECLARE
|
137
|
+
max_id BIGINT;
|
138
|
+
BEGIN
|
139
|
+
-- 获取最大ID
|
140
|
+
SELECT COALESCE(MAX(id), 0) INTO max_id FROM tasks;
|
141
|
+
-- 更新序列
|
142
|
+
IF max_id > 0 THEN
|
143
|
+
EXECUTE format('ALTER SEQUENCE tasks_id_seq RESTART WITH %s', max_id + 1);
|
144
|
+
END IF;
|
145
|
+
|
146
|
+
SELECT COALESCE(MAX(id), 0) INTO max_id FROM task_runs;
|
147
|
+
IF max_id > 0 THEN
|
148
|
+
EXECUTE format('ALTER SEQUENCE task_runs_id_seq RESTART WITH %s', max_id + 1);
|
149
|
+
END IF;
|
150
|
+
END $$;
|
151
|
+
|
152
|
+
COMMIT;
|
153
|
+
|
154
|
+
-- 6. 验证迁移结果
|
155
|
+
SELECT
|
156
|
+
'Tasks Table' as table_name,
|
157
|
+
COUNT(*) as row_count
|
158
|
+
FROM tasks
|
159
|
+
UNION ALL
|
160
|
+
SELECT
|
161
|
+
'Task Runs Table' as table_name,
|
162
|
+
COUNT(*) as row_count
|
163
|
+
FROM task_runs
|
164
|
+
UNION ALL
|
165
|
+
SELECT
|
166
|
+
'Original Backup Table' as table_name,
|
167
|
+
COUNT(*) as row_count
|
168
|
+
FROM tasks_backup
|
169
|
+
WHERE EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'tasks_backup');
|
170
|
+
|
171
|
+
-- 显示迁移统计
|
172
|
+
SELECT
|
173
|
+
'Migration Summary' as info,
|
174
|
+
jsonb_build_object(
|
175
|
+
'total_tasks', (SELECT COUNT(*) FROM tasks),
|
176
|
+
'total_runs', (SELECT COUNT(*) FROM task_runs),
|
177
|
+
'unique_consumer_groups', (SELECT COUNT(DISTINCT consumer_group) FROM task_runs),
|
178
|
+
'migration_time', now()
|
179
|
+
) as details;
|