@nexusgpu/repterm-plugin-kubectl 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +277 -0
- package/dist/index.d.ts +314 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +544 -0
- package/dist/matchers.d.ts +113 -0
- package/dist/matchers.d.ts.map +1 -0
- package/dist/matchers.js +527 -0
- package/dist/plugin-kubectl/examples/00-simple-demo.d.ts +10 -0
- package/dist/plugin-kubectl/examples/00-simple-demo.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/00-simple-demo.js +51 -0
- package/dist/plugin-kubectl/examples/01-basic-kubectl.d.ts +13 -0
- package/dist/plugin-kubectl/examples/01-basic-kubectl.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/01-basic-kubectl.js +86 -0
- package/dist/plugin-kubectl/examples/02-debugging.d.ts +13 -0
- package/dist/plugin-kubectl/examples/02-debugging.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/02-debugging.js +80 -0
- package/dist/plugin-kubectl/examples/03-resource-management.d.ts +13 -0
- package/dist/plugin-kubectl/examples/03-resource-management.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/03-resource-management.js +134 -0
- package/dist/plugin-kubectl/examples/04-rollout.d.ts +13 -0
- package/dist/plugin-kubectl/examples/04-rollout.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/04-rollout.js +122 -0
- package/dist/plugin-kubectl/examples/05-matchers.d.ts +15 -0
- package/dist/plugin-kubectl/examples/05-matchers.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/05-matchers.js +138 -0
- package/dist/plugin-kubectl/examples/06-advanced.d.ts +14 -0
- package/dist/plugin-kubectl/examples/06-advanced.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/06-advanced.js +140 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/00-prerequisites.d.ts +14 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/00-prerequisites.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/00-prerequisites.js +66 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/01-workload-allocation.d.ts +14 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/01-workload-allocation.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/01-workload-allocation.js +145 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/02-annotation-mode.d.ts +13 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/02-annotation-mode.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/02-annotation-mode.js +123 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/03-insufficient.d.ts +17 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/03-insufficient.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/03-insufficient.js +96 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/04-release.d.ts +13 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/04-release.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/04-release.js +117 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/05-multi-workload-shared-gpu.d.ts +14 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/05-multi-workload-shared-gpu.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/05-multi-workload-shared-gpu.js +145 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/06-workload-resource-resize.d.ts +14 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/06-workload-resource-resize.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/06-workload-resource-resize.js +235 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/07-workload-worker-pod-generation.d.ts +15 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/07-workload-worker-pod-generation.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/07-workload-worker-pod-generation.js +146 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/08-workload-replicas-scale.d.ts +13 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/08-workload-replicas-scale.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/08-workload-replicas-scale.js +141 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/09-gpu-remote-invocation.d.ts +15 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/09-gpu-remote-invocation.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/09-gpu-remote-invocation.js +256 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/_config.d.ts +71 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/_config.d.ts.map +1 -0
- package/dist/plugin-kubectl/examples/tensor-fusion/_config.js +159 -0
- package/dist/plugin-kubectl/src/index.d.ts +314 -0
- package/dist/plugin-kubectl/src/index.d.ts.map +1 -0
- package/dist/plugin-kubectl/src/index.js +545 -0
- package/dist/plugin-kubectl/src/matchers.d.ts +113 -0
- package/dist/plugin-kubectl/src/matchers.d.ts.map +1 -0
- package/dist/plugin-kubectl/src/matchers.js +527 -0
- package/dist/plugin-kubectl/src/result.d.ts +80 -0
- package/dist/plugin-kubectl/src/result.d.ts.map +1 -0
- package/dist/plugin-kubectl/src/result.js +134 -0
- package/dist/repterm/src/api/describe.d.ts +18 -0
- package/dist/repterm/src/api/describe.d.ts.map +1 -0
- package/dist/repterm/src/api/describe.js +32 -0
- package/dist/repterm/src/api/expect.d.ts +43 -0
- package/dist/repterm/src/api/expect.d.ts.map +1 -0
- package/dist/repterm/src/api/expect.js +166 -0
- package/dist/repterm/src/api/hooks.d.ts +178 -0
- package/dist/repterm/src/api/hooks.d.ts.map +1 -0
- package/dist/repterm/src/api/hooks.js +230 -0
- package/dist/repterm/src/api/steps.d.ts +45 -0
- package/dist/repterm/src/api/steps.d.ts.map +1 -0
- package/dist/repterm/src/api/steps.js +105 -0
- package/dist/repterm/src/api/test.d.ts +101 -0
- package/dist/repterm/src/api/test.d.ts.map +1 -0
- package/dist/repterm/src/api/test.js +206 -0
- package/dist/repterm/src/index.d.ts +15 -0
- package/dist/repterm/src/index.d.ts.map +1 -0
- package/dist/repterm/src/index.js +23 -0
- package/dist/repterm/src/plugin/index.d.ts +47 -0
- package/dist/repterm/src/plugin/index.d.ts.map +1 -0
- package/dist/repterm/src/plugin/index.js +85 -0
- package/dist/repterm/src/plugin/withPlugins.d.ts +71 -0
- package/dist/repterm/src/plugin/withPlugins.d.ts.map +1 -0
- package/dist/repterm/src/plugin/withPlugins.js +100 -0
- package/dist/repterm/src/runner/models.d.ts +261 -0
- package/dist/repterm/src/runner/models.d.ts.map +1 -0
- package/dist/repterm/src/runner/models.js +4 -0
- package/dist/result.d.ts +80 -0
- package/dist/result.d.ts.map +1 -0
- package/dist/result.js +134 -0
- package/package.json +38 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 示例 6: 进阶功能
|
|
3
|
+
*
|
|
4
|
+
* 演示 kubectl 插件的进阶 API:
|
|
5
|
+
* portForward, waitForService, getEvents, getNodes, cp
|
|
6
|
+
*
|
|
7
|
+
* 运行方式:
|
|
8
|
+
* bun run repterm packages/plugin-kubectl/examples/06-advanced.ts
|
|
9
|
+
*
|
|
10
|
+
* 前置条件:
|
|
11
|
+
* - 已配置 kubectl 并连接到 Kubernetes 集群
|
|
12
|
+
*/
|
|
13
|
+
import { describe, defineConfig, createTestWithPlugins, } from '../../repterm/src/index.js';
|
|
14
|
+
import { kubectlPlugin } from '../src/index.js';
|
|
15
|
+
// 配置插件
|
|
16
|
+
const config = defineConfig({
|
|
17
|
+
plugins: [kubectlPlugin({ namespace: 'default' })],
|
|
18
|
+
});
|
|
19
|
+
const test = createTestWithPlugins(config);
|
|
20
|
+
// 测试用资源
|
|
21
|
+
const webAppYaml = `
|
|
22
|
+
apiVersion: apps/v1
|
|
23
|
+
kind: Deployment
|
|
24
|
+
metadata:
|
|
25
|
+
name: webapp
|
|
26
|
+
spec:
|
|
27
|
+
replicas: 1
|
|
28
|
+
selector:
|
|
29
|
+
matchLabels:
|
|
30
|
+
app: webapp
|
|
31
|
+
template:
|
|
32
|
+
metadata:
|
|
33
|
+
labels:
|
|
34
|
+
app: webapp
|
|
35
|
+
spec:
|
|
36
|
+
containers:
|
|
37
|
+
- name: nginx
|
|
38
|
+
image: nginx:alpine
|
|
39
|
+
ports:
|
|
40
|
+
- containerPort: 80
|
|
41
|
+
---
|
|
42
|
+
apiVersion: v1
|
|
43
|
+
kind: Service
|
|
44
|
+
metadata:
|
|
45
|
+
name: webapp-svc
|
|
46
|
+
spec:
|
|
47
|
+
selector:
|
|
48
|
+
app: webapp
|
|
49
|
+
ports:
|
|
50
|
+
- port: 80
|
|
51
|
+
targetPort: 80
|
|
52
|
+
`;
|
|
53
|
+
const filePodYaml = `
|
|
54
|
+
apiVersion: v1
|
|
55
|
+
kind: Pod
|
|
56
|
+
metadata:
|
|
57
|
+
name: file-pod
|
|
58
|
+
spec:
|
|
59
|
+
containers:
|
|
60
|
+
- name: main
|
|
61
|
+
image: busybox
|
|
62
|
+
command: ['sh', '-c', 'echo "test content" > /tmp/test.txt && sleep 3600']
|
|
63
|
+
`;
|
|
64
|
+
describe('进阶功能 API', () => {
|
|
65
|
+
// ===== getNodes - 获取节点信息 =====
|
|
66
|
+
test('getNodes - 获取集群节点', async (ctx) => {
|
|
67
|
+
const { kubectl } = ctx.plugins;
|
|
68
|
+
const nodes = await kubectl.getNodes();
|
|
69
|
+
for (const node of nodes) {
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
test('getNodes - 使用选择器过滤', async (ctx) => {
|
|
73
|
+
const { kubectl } = ctx.plugins;
|
|
74
|
+
// 尝试按标签过滤(可能返回空)
|
|
75
|
+
const controlPlaneNodes = await kubectl.getNodes({ selector: 'node-role.kubernetes.io/control-plane' });
|
|
76
|
+
});
|
|
77
|
+
// 准备测试资源
|
|
78
|
+
test('准备: 创建测试资源', async (ctx) => {
|
|
79
|
+
const { kubectl } = ctx.plugins;
|
|
80
|
+
await kubectl.apply(webAppYaml);
|
|
81
|
+
await kubectl.apply(filePodYaml);
|
|
82
|
+
await kubectl.wait('deployment', 'webapp', 'Available', { timeout: 120000 });
|
|
83
|
+
await kubectl.waitForPod('file-pod', 'Running', 60000);
|
|
84
|
+
});
|
|
85
|
+
// ===== getEvents - 获取集群事件 =====
|
|
86
|
+
test('getEvents - 获取命名空间事件', async (ctx) => {
|
|
87
|
+
const { kubectl } = ctx.plugins;
|
|
88
|
+
const events = await kubectl.getEvents();
|
|
89
|
+
// 显示最近几个事件
|
|
90
|
+
const recentEvents = events.slice(0, 5);
|
|
91
|
+
for (const event of recentEvents) {
|
|
92
|
+
}
|
|
93
|
+
});
|
|
94
|
+
test('getEvents - 使用字段选择器过滤', async (ctx) => {
|
|
95
|
+
const { kubectl } = ctx.plugins;
|
|
96
|
+
// 过滤特定资源的事件
|
|
97
|
+
const webappEvents = await kubectl.getEvents({
|
|
98
|
+
fieldSelector: 'involvedObject.name=webapp',
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
// ===== waitForService - 等待 Service 就绪 =====
|
|
102
|
+
test('waitForService - 等待 Service 有 endpoints', async (ctx) => {
|
|
103
|
+
const { kubectl } = ctx.plugins;
|
|
104
|
+
const endpoint = await kubectl.waitForService('webapp-svc', 60000);
|
|
105
|
+
});
|
|
106
|
+
// ===== portForward - 端口转发 =====
|
|
107
|
+
test('portForward - 端口转发到 Service', async (ctx) => {
|
|
108
|
+
const { kubectl } = ctx.plugins;
|
|
109
|
+
// 启动端口转发
|
|
110
|
+
const handle = await kubectl.portForward('svc/webapp-svc', '18080:80', { delay: 2000 });
|
|
111
|
+
// 可以在这里测试连接
|
|
112
|
+
// const response = await fetch('http://localhost:18080');
|
|
113
|
+
// 停止端口转发
|
|
114
|
+
await handle.stop();
|
|
115
|
+
});
|
|
116
|
+
// ===== cp - 文件复制 =====
|
|
117
|
+
test('cp - 从 Pod 复制文件到本地', async (ctx) => {
|
|
118
|
+
const { kubectl } = ctx.plugins;
|
|
119
|
+
// 复制文件到本地
|
|
120
|
+
await kubectl.cp('file-pod:/tmp/test.txt', '/tmp/k8s-test.txt');
|
|
121
|
+
});
|
|
122
|
+
test('cp - 从本地复制文件到 Pod', async (ctx) => {
|
|
123
|
+
const { kubectl } = ctx.plugins;
|
|
124
|
+
// 创建本地文件
|
|
125
|
+
await ctx.terminal.run('echo "uploaded content" > /tmp/upload.txt');
|
|
126
|
+
// 复制到 Pod
|
|
127
|
+
await kubectl.cp('/tmp/upload.txt', 'file-pod:/tmp/uploaded.txt');
|
|
128
|
+
// 验证
|
|
129
|
+
await kubectl.exec('file-pod', 'cat /tmp/uploaded.txt');
|
|
130
|
+
});
|
|
131
|
+
// 清理
|
|
132
|
+
test('清理: 删除测试资源', async (ctx) => {
|
|
133
|
+
const { kubectl } = ctx.plugins;
|
|
134
|
+
await kubectl.delete('deployment', 'webapp', { force: true });
|
|
135
|
+
await kubectl.delete('service', 'webapp-svc', { force: true });
|
|
136
|
+
await kubectl.delete('pod', 'file-pod', { force: true });
|
|
137
|
+
// 清理本地临时文件
|
|
138
|
+
await ctx.terminal.run('rm -f /tmp/k8s-test.txt /tmp/upload.txt');
|
|
139
|
+
});
|
|
140
|
+
});
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 0: 前置条件检查
|
|
3
|
+
*
|
|
4
|
+
* 验证测试环境满足以下条件:
|
|
5
|
+
* - Kubernetes 集群连接正常
|
|
6
|
+
* - Tensor Fusion Controller 运行正常
|
|
7
|
+
* - GPUPool 存在且状态为 Ready
|
|
8
|
+
* - 至少有一个可用 GPU,资源充足
|
|
9
|
+
*
|
|
10
|
+
* 运行方式:
|
|
11
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/00-prerequisites.ts
|
|
12
|
+
*/
|
|
13
|
+
export {};
|
|
14
|
+
//# sourceMappingURL=00-prerequisites.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"00-prerequisites.d.ts","sourceRoot":"","sources":["../../../../examples/tensor-fusion/00-prerequisites.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG"}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 0: 前置条件检查
|
|
3
|
+
*
|
|
4
|
+
* 验证测试环境满足以下条件:
|
|
5
|
+
* - Kubernetes 集群连接正常
|
|
6
|
+
* - Tensor Fusion Controller 运行正常
|
|
7
|
+
* - GPUPool 存在且状态为 Ready
|
|
8
|
+
* - 至少有一个可用 GPU,资源充足
|
|
9
|
+
*
|
|
10
|
+
* 运行方式:
|
|
11
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/00-prerequisites.ts
|
|
12
|
+
*/
|
|
13
|
+
import { test, describe, expect, step, gpupool, resource, TEST_GPU_POOL, TF_SYSTEM_NAMESPACE, TF_CONTROLLER_DEPLOYMENT, getFirstGpuName, getGpuAvailable, parseTflops, } from './_config.js';
|
|
14
|
+
describe('前置条件检查', { record: true }, () => {
|
|
15
|
+
// ===== 集群连接检查 =====
|
|
16
|
+
test('验证 Kubernetes 集群连接', async (ctx) => {
|
|
17
|
+
const { kubectl } = ctx.plugins;
|
|
18
|
+
await step('检查集群连接状态', async () => {
|
|
19
|
+
const clusterInfo = await kubectl.clusterInfo();
|
|
20
|
+
expect(clusterInfo.reachable).toBe(true);
|
|
21
|
+
});
|
|
22
|
+
});
|
|
23
|
+
// ===== Tensor Fusion Controller 检查 =====
|
|
24
|
+
test('验证 Tensor Fusion Controller 运行状态', async (ctx) => {
|
|
25
|
+
const { kubectl } = ctx.plugins;
|
|
26
|
+
await step('检查 Controller Deployment', async () => {
|
|
27
|
+
const originalNs = kubectl.getNamespace();
|
|
28
|
+
kubectl.setNamespace(TF_SYSTEM_NAMESPACE);
|
|
29
|
+
try {
|
|
30
|
+
const controllerDeployment = resource(kubectl, 'deployment', TF_CONTROLLER_DEPLOYMENT);
|
|
31
|
+
await expect(controllerDeployment).toExistInCluster();
|
|
32
|
+
await expect(controllerDeployment).toBeAvailable();
|
|
33
|
+
}
|
|
34
|
+
finally {
|
|
35
|
+
kubectl.setNamespace(originalNs);
|
|
36
|
+
}
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
// ===== GPUPool 检查 =====
|
|
40
|
+
test('验证 GPUPool 存在且就绪', async (ctx) => {
|
|
41
|
+
const { kubectl } = ctx.plugins;
|
|
42
|
+
await step('检查 GPUPool 存在', async () => {
|
|
43
|
+
const pool = gpupool(kubectl, TEST_GPU_POOL);
|
|
44
|
+
await expect(pool).toExistInCluster();
|
|
45
|
+
});
|
|
46
|
+
await step('检查 GPUPool 状态', async () => {
|
|
47
|
+
const pool = gpupool(kubectl, TEST_GPU_POOL);
|
|
48
|
+
await expect(pool).toHaveStatusField('phase', 'Running');
|
|
49
|
+
});
|
|
50
|
+
});
|
|
51
|
+
// ===== GPU 资源检查 =====
|
|
52
|
+
test('验证 GPU 资源充足', async (ctx) => {
|
|
53
|
+
const { kubectl } = ctx.plugins;
|
|
54
|
+
const gpuName = await getFirstGpuName(kubectl);
|
|
55
|
+
await step('检查 GPU 可用资源', async () => {
|
|
56
|
+
const available = await getGpuAvailable(kubectl, gpuName);
|
|
57
|
+
const tflopsValue = parseTflops(available.tflops);
|
|
58
|
+
// 验证资源满足测试要求 (至少 2000m TFlops)
|
|
59
|
+
expect(tflopsValue).toBeGreaterThanOrEqual(2000);
|
|
60
|
+
});
|
|
61
|
+
await step('检查 GPU 状态', async () => {
|
|
62
|
+
const exists = await kubectl.exists('gpu', gpuName);
|
|
63
|
+
expect(exists).toBe(true);
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
});
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 1: 正常资源分配 - 使用 TensorFusionWorkload
|
|
3
|
+
*
|
|
4
|
+
* 验证通过创建 TensorFusionWorkload 可以:
|
|
5
|
+
* - 正确分配 GPU 资源
|
|
6
|
+
* - Workload 状态变为 Running
|
|
7
|
+
* - GPU 可用资源正确减少
|
|
8
|
+
* - Worker Pod 包含正确的 annotations
|
|
9
|
+
*
|
|
10
|
+
* 运行方式:
|
|
11
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/01-workload-allocation.ts
|
|
12
|
+
*/
|
|
13
|
+
export {};
|
|
14
|
+
//# sourceMappingURL=01-workload-allocation.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"01-workload-allocation.d.ts","sourceRoot":"","sources":["../../../../examples/tensor-fusion/01-workload-allocation.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG"}
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 1: 正常资源分配 - 使用 TensorFusionWorkload
|
|
3
|
+
*
|
|
4
|
+
* 验证通过创建 TensorFusionWorkload 可以:
|
|
5
|
+
* - 正确分配 GPU 资源
|
|
6
|
+
* - Workload 状态变为 Running
|
|
7
|
+
* - GPU 可用资源正确减少
|
|
8
|
+
* - Worker Pod 包含正确的 annotations
|
|
9
|
+
*
|
|
10
|
+
* 运行方式:
|
|
11
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/01-workload-allocation.ts
|
|
12
|
+
*/
|
|
13
|
+
import { sleep } from 'bun';
|
|
14
|
+
import { test, describe, expect, step, tensorfusionworkload, workloadYaml, DEFAULT_TIMEOUT, getFirstGpuName, getGpuAvailable, parseTflops, } from './_config.js';
|
|
15
|
+
const WORKLOAD_NAME = 'test-workload-alloc';
|
|
16
|
+
describe('测试场景 1: 正常资源分配 - TensorFusionWorkload', { record: true }, () => {
|
|
17
|
+
test('TensorFusionWorkload 资源分配完整流程', async (ctx) => {
|
|
18
|
+
const { kubectl } = ctx.plugins;
|
|
19
|
+
let gpuName;
|
|
20
|
+
let initialTflops;
|
|
21
|
+
// ===== Step 1: 记录初始状态 =====
|
|
22
|
+
await step('获取测试 GPU', {
|
|
23
|
+
showStepTitle: false,
|
|
24
|
+
typingSpeed: 60, // 准备阶段快速执行
|
|
25
|
+
pauseAfter: 1000
|
|
26
|
+
}, async () => {
|
|
27
|
+
gpuName = await getFirstGpuName(kubectl);
|
|
28
|
+
});
|
|
29
|
+
await step('记录初始可用资源', {
|
|
30
|
+
typingSpeed: 60,
|
|
31
|
+
pauseAfter: 1500 // 让观众看清初始状态
|
|
32
|
+
}, async () => {
|
|
33
|
+
const available = await getGpuAvailable(kubectl, gpuName);
|
|
34
|
+
initialTflops = available.tflops;
|
|
35
|
+
});
|
|
36
|
+
// ===== Step 2: 创建资源(核心操作)=====
|
|
37
|
+
await step('创建 Workload', {
|
|
38
|
+
showStepTitle: false,
|
|
39
|
+
typingSpeed: 100, // 核心操作慢速打字
|
|
40
|
+
pauseAfter: 3000 // 重要输出,多停留
|
|
41
|
+
}, async () => {
|
|
42
|
+
const yaml = workloadYaml(WORKLOAD_NAME, {
|
|
43
|
+
tflopsRequest: '1000m',
|
|
44
|
+
tflopsLimit: '2000m',
|
|
45
|
+
vramRequest: '1Gi',
|
|
46
|
+
vramLimit: '2Gi',
|
|
47
|
+
});
|
|
48
|
+
const result = await kubectl.apply(yaml);
|
|
49
|
+
await expect(result).toBeSuccessful();
|
|
50
|
+
});
|
|
51
|
+
// ===== Step 3: 等待 Workload 就绪 =====
|
|
52
|
+
await step('等待 Ready 条件', {
|
|
53
|
+
showStepTitle: false,
|
|
54
|
+
pauseAfter: 2000
|
|
55
|
+
}, async () => {
|
|
56
|
+
const result = await kubectl.wait('tensorfusionworkload', WORKLOAD_NAME, 'Ready', { timeout: DEFAULT_TIMEOUT });
|
|
57
|
+
await expect(result).toBeSuccessful();
|
|
58
|
+
});
|
|
59
|
+
await step('验证 Workload 状态为 Running', {
|
|
60
|
+
typingSpeed: 80,
|
|
61
|
+
pauseAfter: 2000
|
|
62
|
+
}, async () => {
|
|
63
|
+
const workload = tensorfusionworkload(kubectl, WORKLOAD_NAME);
|
|
64
|
+
await expect(workload).toHaveStatusField('phase', 'Running');
|
|
65
|
+
});
|
|
66
|
+
// ===== Step 4: 验证资源分配结果 =====
|
|
67
|
+
await step('检查 GPU 可用资源变化', {
|
|
68
|
+
showStepTitle: false,
|
|
69
|
+
typingSpeed: 80,
|
|
70
|
+
pauseAfter: 2500 // 验证结果需要阅读时间
|
|
71
|
+
}, async () => {
|
|
72
|
+
sleep(1000);
|
|
73
|
+
const afterAvailable = await getGpuAvailable(kubectl, gpuName);
|
|
74
|
+
const initialTflopsNum = parseTflops(initialTflops);
|
|
75
|
+
const afterTflopsNum = parseTflops(afterAvailable.tflops);
|
|
76
|
+
// TFlops 应该减少
|
|
77
|
+
expect(afterTflopsNum).toBeLessThan(initialTflopsNum);
|
|
78
|
+
});
|
|
79
|
+
await step('验证 Workload readyWorkers', {
|
|
80
|
+
pauseAfter: 1500
|
|
81
|
+
}, async () => {
|
|
82
|
+
const status = await kubectl.getJsonPath('tensorfusionworkload', WORKLOAD_NAME, '.status');
|
|
83
|
+
expect(status?.phase).toBe('Running');
|
|
84
|
+
expect(status?.readyWorkers).toBe(1);
|
|
85
|
+
});
|
|
86
|
+
// ===== Step 5: 验证 Worker Pod =====
|
|
87
|
+
await step('查找并验证 Worker Pod', {
|
|
88
|
+
showStepTitle: false,
|
|
89
|
+
typingSpeed: 80,
|
|
90
|
+
pauseAfter: 2000
|
|
91
|
+
}, async () => {
|
|
92
|
+
const pods = await kubectl.get('pod', undefined, {
|
|
93
|
+
selector: `tensor-fusion.ai/workload=${WORKLOAD_NAME}`,
|
|
94
|
+
jqFilter: '[.items[] | {name: .metadata.name, phase: .status.phase, annotations: .metadata.annotations}]'
|
|
95
|
+
});
|
|
96
|
+
expect(pods?.length).toBeGreaterThan(0);
|
|
97
|
+
const workerPod = pods[0];
|
|
98
|
+
expect(workerPod.phase).toBe('Running');
|
|
99
|
+
// 验证 annotations 存在
|
|
100
|
+
const annotations = workerPod.annotations ?? {};
|
|
101
|
+
expect(annotations['tensor-fusion.ai/tflops-request']).toBeDefined();
|
|
102
|
+
expect(annotations['tensor-fusion.ai/vram-request']).toBeDefined();
|
|
103
|
+
});
|
|
104
|
+
// ===== Step 6: 查看 TensorFusionConnection (如果使用 remote vGPU) =====
|
|
105
|
+
await step('查询关联的 Connection', {
|
|
106
|
+
pauseAfter: 1500
|
|
107
|
+
}, async () => {
|
|
108
|
+
try {
|
|
109
|
+
const connections = await kubectl.get('tensorfusionconnection', undefined, {
|
|
110
|
+
selector: `tensor-fusion.ai/workload=${WORKLOAD_NAME}`,
|
|
111
|
+
jqFilter: '[.items[] | {name: .metadata.name, phase: .status.phase}]'
|
|
112
|
+
});
|
|
113
|
+
// Connection 可能不存在(local GPU 模式),这不是错误
|
|
114
|
+
if (connections && connections.length > 0) {
|
|
115
|
+
for (const conn of connections) {
|
|
116
|
+
expect(conn.phase).toBeDefined();
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
catch {
|
|
121
|
+
// TensorFusionConnection CRD 可能不存在
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
// ===== 清理 =====
|
|
125
|
+
await step('删除 TensorFusionWorkload', {
|
|
126
|
+
showStepTitle: false,
|
|
127
|
+
typingSpeed: 80,
|
|
128
|
+
pauseAfter: 2000
|
|
129
|
+
}, async () => {
|
|
130
|
+
const result = await kubectl.delete('tensorfusionworkload', WORKLOAD_NAME);
|
|
131
|
+
await expect(result).toBeSuccessful();
|
|
132
|
+
});
|
|
133
|
+
await step('等待资源释放并验证', {
|
|
134
|
+
pauseAfter: 2000
|
|
135
|
+
}, async () => {
|
|
136
|
+
await new Promise(resolve => setTimeout(resolve, 5000));
|
|
137
|
+
// 验证资源已释放(TFlops 应该恢复)
|
|
138
|
+
const afterRelease = await getGpuAvailable(kubectl, gpuName);
|
|
139
|
+
const releasedTflops = parseTflops(afterRelease.tflops);
|
|
140
|
+
const initialTflopsNum = parseTflops(initialTflops);
|
|
141
|
+
// 允许小误差,但应该接近初始值
|
|
142
|
+
expect(releasedTflops).toBeGreaterThanOrEqual(initialTflopsNum - 100);
|
|
143
|
+
});
|
|
144
|
+
});
|
|
145
|
+
});
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 2: 正常资源分配 - 使用 Pod Annotation
|
|
3
|
+
*
|
|
4
|
+
* 验证通过在 Deployment Pod Template 中添加 Tensor Fusion annotation:
|
|
5
|
+
* - Webhook 自动创建 TensorFusionWorkload
|
|
6
|
+
* - GPU 资源正确分配
|
|
7
|
+
* - Pod 成功调度并运行
|
|
8
|
+
*
|
|
9
|
+
* 运行方式:
|
|
10
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/02-annotation-mode.ts
|
|
11
|
+
*/
|
|
12
|
+
export {};
|
|
13
|
+
//# sourceMappingURL=02-annotation-mode.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"02-annotation-mode.d.ts","sourceRoot":"","sources":["../../../../examples/tensor-fusion/02-annotation-mode.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG"}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 2: 正常资源分配 - 使用 Pod Annotation
|
|
3
|
+
*
|
|
4
|
+
* 验证通过在 Deployment Pod Template 中添加 Tensor Fusion annotation:
|
|
5
|
+
* - Webhook 自动创建 TensorFusionWorkload
|
|
6
|
+
* - GPU 资源正确分配
|
|
7
|
+
* - Pod 成功调度并运行
|
|
8
|
+
*
|
|
9
|
+
* 运行方式:
|
|
10
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/02-annotation-mode.ts
|
|
11
|
+
*/
|
|
12
|
+
import { sleep } from 'bun';
|
|
13
|
+
import { test, describe, expect, step, deployment, annotatedDeploymentYaml, DEFAULT_TIMEOUT, getFirstGpuName, getGpuAvailable, parseTflops, } from './_config.js';
|
|
14
|
+
const DEPLOYMENT_NAME = 'test-workload-annotation';
|
|
15
|
+
describe('测试场景 2: 正常资源分配 - Pod Annotation', { record: true }, () => {
|
|
16
|
+
test('Pod Annotation 模式资源分配完整流程', async (ctx) => {
|
|
17
|
+
const { kubectl } = ctx.plugins;
|
|
18
|
+
let gpuName;
|
|
19
|
+
let initialTflops;
|
|
20
|
+
// ===== Step 1: 准备环境 =====
|
|
21
|
+
await step('获取测试 GPU', {
|
|
22
|
+
showStepTitle: false,
|
|
23
|
+
typingSpeed: 60,
|
|
24
|
+
pauseAfter: 1000
|
|
25
|
+
}, async () => {
|
|
26
|
+
gpuName = await getFirstGpuName(kubectl);
|
|
27
|
+
});
|
|
28
|
+
await step('记录初始可用资源', {
|
|
29
|
+
typingSpeed: 60,
|
|
30
|
+
pauseAfter: 1500
|
|
31
|
+
}, async () => {
|
|
32
|
+
const available = await getGpuAvailable(kubectl, gpuName);
|
|
33
|
+
initialTflops = available.tflops;
|
|
34
|
+
});
|
|
35
|
+
// ===== Step 2: 创建带 Annotation 的 Deployment(核心操作)=====
|
|
36
|
+
await step('创建带 Annotation 的 Deployment', {
|
|
37
|
+
showStepTitle: false,
|
|
38
|
+
typingSpeed: 100,
|
|
39
|
+
pauseAfter: 3000
|
|
40
|
+
}, async () => {
|
|
41
|
+
const yaml = annotatedDeploymentYaml(DEPLOYMENT_NAME, {
|
|
42
|
+
tflopsRequest: '1000m',
|
|
43
|
+
tflopsLimit: '2000m',
|
|
44
|
+
vramRequest: '1Gi',
|
|
45
|
+
vramLimit: '2Gi',
|
|
46
|
+
});
|
|
47
|
+
const result = await kubectl.apply(yaml);
|
|
48
|
+
await expect(result).toBeSuccessful();
|
|
49
|
+
});
|
|
50
|
+
await step('验证 Deployment 的 Tensor Fusion annotations', {
|
|
51
|
+
typingSpeed: 80,
|
|
52
|
+
pauseAfter: 2000
|
|
53
|
+
}, async () => {
|
|
54
|
+
await sleep(2000);
|
|
55
|
+
const annotations = await kubectl.get('deployment', DEPLOYMENT_NAME, {
|
|
56
|
+
jqFilter: '.spec.template.metadata.annotations | with_entries(select(.key | startswith("tensor-fusion.ai")))'
|
|
57
|
+
});
|
|
58
|
+
expect(annotations['tensor-fusion.ai/gpu-pool']).toBeDefined();
|
|
59
|
+
expect(annotations['tensor-fusion.ai/tflops-request']).toBeDefined();
|
|
60
|
+
});
|
|
61
|
+
// ===== Step 3: 验证 Deployment 和 Pod 状态 =====
|
|
62
|
+
await step('检查 Deployment 可用状态', {
|
|
63
|
+
showStepTitle: false,
|
|
64
|
+
typingSpeed: 80,
|
|
65
|
+
pauseAfter: 2000
|
|
66
|
+
}, async () => {
|
|
67
|
+
const deploy = deployment(kubectl, DEPLOYMENT_NAME);
|
|
68
|
+
await expect(deploy).toExistInCluster();
|
|
69
|
+
await kubectl.wait('deployment', DEPLOYMENT_NAME, 'Available', {
|
|
70
|
+
timeout: DEFAULT_TIMEOUT,
|
|
71
|
+
});
|
|
72
|
+
});
|
|
73
|
+
await step('验证 Pod 运行状态', {
|
|
74
|
+
typingSpeed: 80,
|
|
75
|
+
pauseAfter: 2000
|
|
76
|
+
}, async () => {
|
|
77
|
+
const pods = await kubectl.get('pod', undefined, {
|
|
78
|
+
selector: `app=${DEPLOYMENT_NAME}`,
|
|
79
|
+
jqFilter: '[.items[] | {name: .metadata.name, phase: .status.phase}]'
|
|
80
|
+
});
|
|
81
|
+
expect(pods?.length).toBeGreaterThan(0);
|
|
82
|
+
expect(pods[0].phase).toBe('Running');
|
|
83
|
+
});
|
|
84
|
+
// ===== Step 4: 验证 GPU 资源分配结果 =====
|
|
85
|
+
await step('检查 GPU 可用资源变化', {
|
|
86
|
+
showStepTitle: false,
|
|
87
|
+
typingSpeed: 80,
|
|
88
|
+
pauseAfter: 2500
|
|
89
|
+
}, async () => {
|
|
90
|
+
await sleep(1000);
|
|
91
|
+
const available = await getGpuAvailable(kubectl, gpuName);
|
|
92
|
+
const currentTflops = parseTflops(available.tflops);
|
|
93
|
+
const initialTflopsNum = parseTflops(initialTflops);
|
|
94
|
+
// GPU 资源应该减少
|
|
95
|
+
expect(currentTflops).toBeLessThan(initialTflopsNum);
|
|
96
|
+
});
|
|
97
|
+
// ===== 清理 =====
|
|
98
|
+
await step('删除 Deployment', {
|
|
99
|
+
showStepTitle: false,
|
|
100
|
+
typingSpeed: 80,
|
|
101
|
+
pauseAfter: 2000
|
|
102
|
+
}, async () => {
|
|
103
|
+
const result = await kubectl.delete('deployment', DEPLOYMENT_NAME, { force: true });
|
|
104
|
+
await expect(result).toBeSuccessful();
|
|
105
|
+
});
|
|
106
|
+
await step('等待资源释放并验证', {
|
|
107
|
+
pauseAfter: 2000
|
|
108
|
+
}, async () => {
|
|
109
|
+
await sleep(5000);
|
|
110
|
+
// 验证 TensorFusionWorkload 自动清理
|
|
111
|
+
const workloadExists = await kubectl.exists('tensorfusionworkload', DEPLOYMENT_NAME);
|
|
112
|
+
if (workloadExists) {
|
|
113
|
+
await kubectl.delete('tensorfusionworkload', DEPLOYMENT_NAME, { force: true });
|
|
114
|
+
}
|
|
115
|
+
// 验证 GPU 资源已释放(TFlops 应该恢复)
|
|
116
|
+
const afterRelease = await getGpuAvailable(kubectl, gpuName);
|
|
117
|
+
const releasedTflops = parseTflops(afterRelease.tflops);
|
|
118
|
+
const initialTflopsNum = parseTflops(initialTflops);
|
|
119
|
+
// 允许小误差,但应该接近初始值
|
|
120
|
+
expect(releasedTflops).toBeGreaterThanOrEqual(initialTflopsNum - 100);
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
});
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 3: 资源不足场景
|
|
3
|
+
*
|
|
4
|
+
* 验证当请求的 GPU 资源超过可用量时:
|
|
5
|
+
* - TensorFusionWorkload 状态应为 Pending
|
|
6
|
+
* - replicas 应为 0
|
|
7
|
+
* - GPU 可用资源不应减少
|
|
8
|
+
*
|
|
9
|
+
* 录制效果:
|
|
10
|
+
* - 主窗格:持续 watch workload 状态变化
|
|
11
|
+
* - 新窗格:执行查询和验证命令
|
|
12
|
+
*
|
|
13
|
+
* 运行方式:
|
|
14
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/03-insufficient.ts
|
|
15
|
+
*/
|
|
16
|
+
export {};
|
|
17
|
+
//# sourceMappingURL=03-insufficient.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"03-insufficient.d.ts","sourceRoot":"","sources":["../../../../examples/tensor-fusion/03-insufficient.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG"}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 3: 资源不足场景
|
|
3
|
+
*
|
|
4
|
+
* 验证当请求的 GPU 资源超过可用量时:
|
|
5
|
+
* - TensorFusionWorkload 状态应为 Pending
|
|
6
|
+
* - replicas 应为 0
|
|
7
|
+
* - GPU 可用资源不应减少
|
|
8
|
+
*
|
|
9
|
+
* 录制效果:
|
|
10
|
+
* - 主窗格:持续 watch workload 状态变化
|
|
11
|
+
* - 新窗格:执行查询和验证命令
|
|
12
|
+
*
|
|
13
|
+
* 运行方式:
|
|
14
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/03-insufficient.ts
|
|
15
|
+
*/
|
|
16
|
+
import { sleep } from 'bun';
|
|
17
|
+
import { test, describe, expect, step, workloadYaml, getFirstGpuName, getGpuAvailable, } from './_config.js';
|
|
18
|
+
const WORKLOAD_NAME = 'test-workload-insufficient';
|
|
19
|
+
describe('测试场景 3: 资源不足场景', { record: true }, () => {
|
|
20
|
+
test('GPU 资源不足时 Workload 行为验证', async (ctx) => {
|
|
21
|
+
const { kubectl } = ctx.plugins;
|
|
22
|
+
const { terminal } = ctx;
|
|
23
|
+
let gpuName;
|
|
24
|
+
let initialTflops;
|
|
25
|
+
let initialVram;
|
|
26
|
+
// ===== Step 1: 记录初始状态 =====
|
|
27
|
+
await step('获取测试 GPU', {
|
|
28
|
+
showStepTitle: false,
|
|
29
|
+
typingSpeed: 60,
|
|
30
|
+
pauseAfter: 1000
|
|
31
|
+
}, async () => {
|
|
32
|
+
gpuName = await getFirstGpuName(kubectl);
|
|
33
|
+
});
|
|
34
|
+
await step('记录初始可用资源', {
|
|
35
|
+
typingSpeed: 60,
|
|
36
|
+
pauseAfter: 1500
|
|
37
|
+
}, async () => {
|
|
38
|
+
const available = await getGpuAvailable(kubectl, gpuName);
|
|
39
|
+
initialTflops = available.tflops;
|
|
40
|
+
initialVram = available.vram;
|
|
41
|
+
});
|
|
42
|
+
// ===== Step 2: 创建超量资源请求的 Workload(核心操作)=====
|
|
43
|
+
await step('创建超量资源请求的 Workload', {
|
|
44
|
+
showStepTitle: false,
|
|
45
|
+
typingSpeed: 100,
|
|
46
|
+
pauseAfter: 2000
|
|
47
|
+
}, async () => {
|
|
48
|
+
// 请求 100 TFlops 和 100Gi VRAM - 远超任何单 GPU 的容量
|
|
49
|
+
const yaml = workloadYaml(WORKLOAD_NAME, {
|
|
50
|
+
tflopsRequest: '100000m', // 100 TFlops
|
|
51
|
+
tflopsLimit: '100000m',
|
|
52
|
+
vramRequest: '100Gi',
|
|
53
|
+
vramLimit: '100Gi',
|
|
54
|
+
});
|
|
55
|
+
const result = await kubectl.apply(yaml);
|
|
56
|
+
await expect(result).toBeSuccessful();
|
|
57
|
+
});
|
|
58
|
+
// ===== Step 3: 主窗格 watch,新窗格验证 =====
|
|
59
|
+
await step('观察 Workload 状态并验证', {
|
|
60
|
+
showStepTitle: false,
|
|
61
|
+
pauseAfter: 2000
|
|
62
|
+
}, async () => {
|
|
63
|
+
// 在主窗格启动 watch(等待命令输入完成后返回)
|
|
64
|
+
const watchProc = await kubectl.get('tensorfusionworkload', WORKLOAD_NAME, { watch: true });
|
|
65
|
+
// 创建新终端(自动携带 plugins)
|
|
66
|
+
const terminal2 = await terminal.create();
|
|
67
|
+
const kubectl2 = terminal2.plugins.kubectl;
|
|
68
|
+
// 在新窗格执行验证命令
|
|
69
|
+
// 检查状态不是 Running
|
|
70
|
+
const status = await kubectl2.getJsonPath('tensorfusionworkload', WORKLOAD_NAME, '.status');
|
|
71
|
+
expect(status?.phase).not.toBe('Running');
|
|
72
|
+
// 检查 GPU 资源未被分配
|
|
73
|
+
const currentAvailable = await getGpuAvailable(kubectl2, gpuName);
|
|
74
|
+
expect(currentAvailable.tflops).toBe(initialTflops);
|
|
75
|
+
expect(currentAvailable.vram).toBe(initialVram);
|
|
76
|
+
// 获取事件信息
|
|
77
|
+
await kubectl2.get('event', undefined, {
|
|
78
|
+
fieldSelector: `involvedObject.name=${WORKLOAD_NAME},involvedObject.kind=TensorFusionWorkload`,
|
|
79
|
+
jqFilter: '[.items[] | {reason: .reason, message: .message, type: .type}]'
|
|
80
|
+
});
|
|
81
|
+
// 观察一段时间后关闭
|
|
82
|
+
await sleep(3000);
|
|
83
|
+
// 中断 watch
|
|
84
|
+
await watchProc.interrupt();
|
|
85
|
+
});
|
|
86
|
+
// ===== Step 4: 清理 =====
|
|
87
|
+
await step('删除 TensorFusionWorkload', {
|
|
88
|
+
showStepTitle: false,
|
|
89
|
+
typingSpeed: 80,
|
|
90
|
+
pauseAfter: 2000
|
|
91
|
+
}, async () => {
|
|
92
|
+
const result = await kubectl.delete('tensorfusionworkload', WORKLOAD_NAME);
|
|
93
|
+
await expect(result).toBeSuccessful();
|
|
94
|
+
});
|
|
95
|
+
});
|
|
96
|
+
});
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 测试场景 4: 资源释放验证
|
|
3
|
+
*
|
|
4
|
+
* 验证删除 TensorFusionWorkload 后:
|
|
5
|
+
* - GPU 资源正确释放
|
|
6
|
+
* - 可用资源恢复到初始值
|
|
7
|
+
* - 关联的 Worker Pod 被清理
|
|
8
|
+
*
|
|
9
|
+
* 运行方式:
|
|
10
|
+
* bun run repterm packages/plugin-kubectl/examples/tensor-fusion/04-release.ts
|
|
11
|
+
*/
|
|
12
|
+
export {};
|
|
13
|
+
//# sourceMappingURL=04-release.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"04-release.d.ts","sourceRoot":"","sources":["../../../../examples/tensor-fusion/04-release.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG"}
|