tuya-platform-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +218 -0
- package/examples/rag-workflow.json +63 -0
- package/examples/simple-llm.json +37 -0
- package/package.json +40 -0
- package/src/cli.js +78 -0
- package/src/lib/analyze.js +213 -0
- package/src/lib/cdp-client.js +115 -0
- package/src/lib/chrome.js +115 -0
- package/src/lib/commands/auto-basic.js +482 -0
- package/src/lib/commands/configure.js +158 -0
- package/src/lib/commands/doctor.js +184 -0
- package/src/lib/commands/list-libraries.js +33 -0
- package/src/lib/commands/manual-record.js +82 -0
- package/src/lib/commands/publish.js +63 -0
- package/src/lib/commands/sample-branch-edges.js +391 -0
- package/src/lib/commands/sample-extra-nodes.js +204 -0
- package/src/lib/commands/sample-trial-inputs.js +173 -0
- package/src/lib/commands/shared.js +457 -0
- package/src/lib/config.js +204 -0
- package/src/lib/recorder.js +316 -0
- package/src/lib/report.js +309 -0
- package/src/lib/schema-builder.js +431 -0
- package/src/lib/selectors.js +12 -0
- package/src/lib/steps.js +50 -0
- package/src/lib/util.js +93 -0
package/README.md
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# tuya-platform-cli
|
|
2
|
+
|
|
3
|
+
CLI tool for creating, managing, and publishing AI workflows on the Tuya platform via Chrome CDP.
|
|
4
|
+
|
|
5
|
+
## 安装
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# 全局安装
|
|
9
|
+
npm install -g tuya-platform-cli
|
|
10
|
+
|
|
11
|
+
# 或使用 npx 直接运行
|
|
12
|
+
npx tuya-platform-cli --help
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## 运行前提
|
|
16
|
+
|
|
17
|
+
1. 本机 macOS 已安装 Google Chrome。
|
|
18
|
+
2. Chrome 以 `--remote-debugging-port=9440` 启动。
|
|
19
|
+
3. 已登录 `platform.tuya.com`(或 `us.platform.tuya.com`)。
|
|
20
|
+
|
|
21
|
+
## CLI 命令
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
tuya-platform-cli --help
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
也可以通过 `package.json` 脚本运行:
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
npm run tuya -- --help
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### 1. 手工录制 8 步
|
|
34
|
+
|
|
35
|
+
默认命令,行为与之前一致:启动隔离 Chrome,并按步骤提示你手工完成操作。
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
npm run tuya -- \
|
|
39
|
+
--output-dir artifacts/tuya-platform-cli/manual-run \
|
|
40
|
+
--base-url https://platform.tuya.com/ai/workflow
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
等价命令:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
node src/cli.js manual --output-dir artifacts/tuya-platform-cli/manual-run
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
运行后脚本会:
|
|
50
|
+
|
|
51
|
+
1. 复制一份 Chrome `Default` profile 到临时目录,避免锁定现有 Chrome。
|
|
52
|
+
2. 启动带 `--remote-debugging-port` 的专用 Chrome。
|
|
53
|
+
3. 打开平台页面。
|
|
54
|
+
4. 逐步提示你在浏览器中手动完成 8 个操作;每一步结束后回终端按 Enter。
|
|
55
|
+
5. 自动导出:
|
|
56
|
+
- `report.md`
|
|
57
|
+
- `static-analysis.md`
|
|
58
|
+
- 每一步的 `network.json`、`requests/*.json`、`screenshot.png`
|
|
59
|
+
- `bundles/` 目录中的脚本响应体
|
|
60
|
+
|
|
61
|
+
### 0. 执行前自检
|
|
62
|
+
|
|
63
|
+
建议在自动命令前先跑一次 `doctor`,确认:
|
|
64
|
+
|
|
65
|
+
- CDP 端口可连接
|
|
66
|
+
- 当前页面仍是 workflow 页面
|
|
67
|
+
- 登录态有效
|
|
68
|
+
- `workflow/list` 可调用
|
|
69
|
+
- 可选的 `workflowId` 能通过 `workflow/get` / `run-pre-check` / `test/config`
|
|
70
|
+
- 可选的图片路径存在
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
node src/cli.js doctor \
|
|
74
|
+
--remote-debugging-port 9440 \
|
|
75
|
+
--workflow-id 5883 \
|
|
76
|
+
--region AZ \
|
|
77
|
+
--image-path artifacts/live-run/add-node-inspect.png \
|
|
78
|
+
--output-dir artifacts/live-run/doctor
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
或者:
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
npm run tuya:doctor -- --remote-debugging-port 9440 --workflow-id 5883 --region AZ
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
输出包括:
|
|
88
|
+
|
|
89
|
+
- `doctor.json`
|
|
90
|
+
- `doctor.md`
|
|
91
|
+
|
|
92
|
+
### 2. 自动跑基础 8 步
|
|
93
|
+
|
|
94
|
+
这个命令不会启动新 Chrome,而是直接连接到已开启 CDP 的 Chrome。
|
|
95
|
+
适合你已经手动登录好平台,并希望自动执行“列表 -> 创建 -> 编辑 -> 加节点 -> 保存 -> 试运行 -> 删除”。
|
|
96
|
+
|
|
97
|
+
```bash
|
|
98
|
+
node src/cli.js auto-basic \
|
|
99
|
+
--remote-debugging-port 9440 \
|
|
100
|
+
--output-dir artifacts/live-run/auto-basic
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
或者:
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
npm run tuya:auto -- --remote-debugging-port 9440
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
前提:
|
|
110
|
+
|
|
111
|
+
- 目标 Chrome 已用 `--remote-debugging-port=9440` 启动
|
|
112
|
+
- 当前账号已登录 `platform.tuya.com`
|
|
113
|
+
|
|
114
|
+
### 3. 补采扩展节点样本
|
|
115
|
+
|
|
116
|
+
当前内置 `Loop` 和 `Monitor(定时监听)` 两种额外节点的自动采样。
|
|
117
|
+
命令会连接当前 Chrome,会话内直接创建样本 workflow,并导出节点 schema 样本。
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
node src/cli.js sample-extra-nodes \
|
|
121
|
+
--remote-debugging-port 9440 \
|
|
122
|
+
--region AZ \
|
|
123
|
+
--output-dir artifacts/live-run/extra-nodes
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
或者:
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
npm run tuya:samples -- --remote-debugging-port 9440 --region AZ
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
输出包括:
|
|
133
|
+
|
|
134
|
+
- `extra-node-samples-summary.json`
|
|
135
|
+
- `extra-node-samples-extended.json`
|
|
136
|
+
- `extra-node-samples.md`
|
|
137
|
+
- `loop-node-sample-<workflowId>.json`
|
|
138
|
+
- `monitor-node-sample-<workflowId>.json`
|
|
139
|
+
|
|
140
|
+
### 4. 补采多分支 edges 样本
|
|
141
|
+
|
|
142
|
+
这个命令会自动创建一个“开始 -> 选择器 -> 双分支大模型 -> 双结束节点”的工作流,并直接落盘保存 schema。
|
|
143
|
+
|
|
144
|
+
```bash
|
|
145
|
+
node src/cli.js sample-branch-edges \
|
|
146
|
+
--remote-debugging-port 9440 \
|
|
147
|
+
--region AZ \
|
|
148
|
+
--output-dir artifacts/live-run/branch-edges
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
或者:
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
npm run tuya:branches -- --remote-debugging-port 9440 --region AZ
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
输出包括:
|
|
158
|
+
|
|
159
|
+
- `branch-edges-<workflowId>.json`
|
|
160
|
+
- `branch-edges.md`
|
|
161
|
+
|
|
162
|
+
### 5. 补采 trial-run 输入样本
|
|
163
|
+
|
|
164
|
+
这个命令要求提供一个已经可试运行的 `workflowId`。命令会自动执行:
|
|
165
|
+
|
|
166
|
+
- `run-pre-check`
|
|
167
|
+
- `test/config`
|
|
168
|
+
- `test/config/save`
|
|
169
|
+
- 文本输入执行
|
|
170
|
+
- 可选的图文输入执行
|
|
171
|
+
|
|
172
|
+
文本 + 图片样本需要显式传入图片路径:
|
|
173
|
+
|
|
174
|
+
```bash
|
|
175
|
+
node src/cli.js sample-trial-inputs \
|
|
176
|
+
--remote-debugging-port 9440 \
|
|
177
|
+
--workflow-id 5883 \
|
|
178
|
+
--region AZ \
|
|
179
|
+
--image-path artifacts/live-run/add-node-inspect.png \
|
|
180
|
+
--output-dir artifacts/live-run/trial-inputs
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
或者:
|
|
184
|
+
|
|
185
|
+
```bash
|
|
186
|
+
npm run tuya:trial-inputs -- --remote-debugging-port 9440 --workflow-id 5883 --region AZ
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
## 通用参数
|
|
190
|
+
|
|
191
|
+
```text
|
|
192
|
+
--base-url
|
|
193
|
+
--chrome-path
|
|
194
|
+
--chrome-profile-root
|
|
195
|
+
--chrome-profile-name
|
|
196
|
+
--output-dir
|
|
197
|
+
--remote-debugging-port
|
|
198
|
+
--workflow-name
|
|
199
|
+
--workflow-id
|
|
200
|
+
--region
|
|
201
|
+
--image-path
|
|
202
|
+
--headless
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
## 推荐流程
|
|
206
|
+
|
|
207
|
+
1. 在专用 Chrome 窗口中先确认登录态仍然有效。
|
|
208
|
+
2. 严格按终端提示逐步执行,避免额外点击造成噪音请求。
|
|
209
|
+
3. 如果某一步明显失败,不要继续误操作;先结束该步,让报告记录失败痕迹。
|
|
210
|
+
4. 试运行步骤结束后等待流式响应完全停止,再回终端确认。
|
|
211
|
+
|
|
212
|
+
## 局限
|
|
213
|
+
|
|
214
|
+
- 画布基于 Canvas/WebGL,脚本不会尝试解析节点视觉结构。
|
|
215
|
+
- 若目标请求完全隐藏在系统层无法被 CDP 获取,可在报告基础上结合代理抓包继续补充。
|
|
216
|
+
- 报告中的 schema 摘要来自实测保存 payload 和 bundle 静态分析,不是官方协议文档。
|
|
217
|
+
- `auto-basic` 和 `sample-extra-nodes` 强依赖当前登录态,以及页面文案/布局没有大改。
|
|
218
|
+
- `sample-trial-inputs` 依赖当前账号下已有可运行 workflow,且试运行授权已经完成。
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "RAG知识库问答工作流",
|
|
3
|
+
"description": "用户输入 → 知识库检索 → 大模型生成回答 → 输出",
|
|
4
|
+
"region": "AZ",
|
|
5
|
+
"startOutputs": [
|
|
6
|
+
{ "id": "USER_TEXT", "type": "string", "required": true }
|
|
7
|
+
],
|
|
8
|
+
"nodes": [
|
|
9
|
+
{
|
|
10
|
+
"id": "sk_1",
|
|
11
|
+
"type": "SearchKnowledge",
|
|
12
|
+
"title": "知识库检索",
|
|
13
|
+
"inputs": [
|
|
14
|
+
{ "id": "q1", "ref": "start.USER_TEXT", "name": "query" }
|
|
15
|
+
],
|
|
16
|
+
"params": {
|
|
17
|
+
"libResource": [
|
|
18
|
+
{
|
|
19
|
+
"canUse": true,
|
|
20
|
+
"libCategories": [],
|
|
21
|
+
"libCode": "YOUR_LIB_CODE",
|
|
22
|
+
"libName": "你的知识库名称",
|
|
23
|
+
"supportCategory": true
|
|
24
|
+
}
|
|
25
|
+
],
|
|
26
|
+
"maxRecallNum": 5,
|
|
27
|
+
"searchStrategy": 1,
|
|
28
|
+
"similarity": 0.7
|
|
29
|
+
},
|
|
30
|
+
"outputs": [
|
|
31
|
+
{ "id": "d1", "name": "data", "type": "string" }
|
|
32
|
+
]
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"id": "llm_1",
|
|
36
|
+
"type": "LLM",
|
|
37
|
+
"title": "RAG回答生成",
|
|
38
|
+
"inputs": [
|
|
39
|
+
{ "id": "in1", "ref": "sk_1.d1", "name": "input" }
|
|
40
|
+
],
|
|
41
|
+
"params": {
|
|
42
|
+
"model": "160",
|
|
43
|
+
"systemPrompt": "你是一个RAG问答助手。优先依据知识库检索结果回答,不要编造;如果检索信息不足,直接说明信息不足。",
|
|
44
|
+
"userPrompt": "请结合检索到的知识内容,对用户问题给出简洁准确的回答。"
|
|
45
|
+
},
|
|
46
|
+
"outputs": [
|
|
47
|
+
{ "id": "out1", "name": "output", "type": "string" }
|
|
48
|
+
]
|
|
49
|
+
}
|
|
50
|
+
],
|
|
51
|
+
"ends": [
|
|
52
|
+
{
|
|
53
|
+
"id": "end_1",
|
|
54
|
+
"title": "输出",
|
|
55
|
+
"input": { "id": "in_end", "ref": "llm_1.out1" }
|
|
56
|
+
}
|
|
57
|
+
],
|
|
58
|
+
"edges": [
|
|
59
|
+
["start", "sk_1"],
|
|
60
|
+
["sk_1", "llm_1"],
|
|
61
|
+
["llm_1", "end_1"]
|
|
62
|
+
]
|
|
63
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "简单LLM工作流",
|
|
3
|
+
"description": "最小示例:用户输入 → 大模型回答 → 输出",
|
|
4
|
+
"region": "AZ",
|
|
5
|
+
"startOutputs": [
|
|
6
|
+
{ "id": "USER_TEXT", "type": "string", "required": true }
|
|
7
|
+
],
|
|
8
|
+
"nodes": [
|
|
9
|
+
{
|
|
10
|
+
"id": "llm_1",
|
|
11
|
+
"type": "LLM",
|
|
12
|
+
"title": "大模型",
|
|
13
|
+
"inputs": [
|
|
14
|
+
{ "id": "in1", "ref": "start.USER_TEXT", "name": "input" }
|
|
15
|
+
],
|
|
16
|
+
"params": {
|
|
17
|
+
"model": "160",
|
|
18
|
+
"systemPrompt": "你是一个有帮助的助手。",
|
|
19
|
+
"userPrompt": "请回答用户的问题。"
|
|
20
|
+
},
|
|
21
|
+
"outputs": [
|
|
22
|
+
{ "id": "out1", "name": "output", "type": "string" }
|
|
23
|
+
]
|
|
24
|
+
}
|
|
25
|
+
],
|
|
26
|
+
"ends": [
|
|
27
|
+
{
|
|
28
|
+
"id": "end_1",
|
|
29
|
+
"title": "输出",
|
|
30
|
+
"input": { "id": "in_end", "ref": "llm_1.out1" }
|
|
31
|
+
}
|
|
32
|
+
],
|
|
33
|
+
"edges": [
|
|
34
|
+
["start", "llm_1"],
|
|
35
|
+
["llm_1", "end_1"]
|
|
36
|
+
]
|
|
37
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "tuya-platform-cli",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "CLI tool for creating, managing, and publishing AI workflows on the Tuya platform via Chrome CDP.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"tuya-platform-cli": "./src/cli.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"src/",
|
|
11
|
+
"examples/",
|
|
12
|
+
"README.md"
|
|
13
|
+
],
|
|
14
|
+
"engines": {
|
|
15
|
+
"node": ">=18.0.0"
|
|
16
|
+
},
|
|
17
|
+
"keywords": [
|
|
18
|
+
"tuya",
|
|
19
|
+
"workflow",
|
|
20
|
+
"ai",
|
|
21
|
+
"cdp",
|
|
22
|
+
"cli"
|
|
23
|
+
],
|
|
24
|
+
"scripts": {
|
|
25
|
+
"tuya:doctor": "node src/cli.js doctor",
|
|
26
|
+
"tuya": "node src/cli.js manual",
|
|
27
|
+
"tuya:auto": "node src/cli.js auto-basic",
|
|
28
|
+
"tuya:samples": "node src/cli.js sample-extra-nodes",
|
|
29
|
+
"tuya:branches": "node src/cli.js sample-branch-edges",
|
|
30
|
+
"tuya:trial-inputs": "node src/cli.js sample-trial-inputs",
|
|
31
|
+
"tuya:configure": "node src/cli.js configure",
|
|
32
|
+
"tuya:publish": "node src/cli.js publish",
|
|
33
|
+
"tuya:list-libraries": "node src/cli.js list-libraries",
|
|
34
|
+
"test": "node --test"
|
|
35
|
+
},
|
|
36
|
+
"dependencies": {
|
|
37
|
+
"playwright": "^1.58.2",
|
|
38
|
+
"ws": "^8.18.3"
|
|
39
|
+
}
|
|
40
|
+
}
|
package/src/cli.js
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { buildHelpText, parseCliArgs } from "./lib/config.js";
|
|
3
|
+
import { runManualRecord } from "./lib/commands/manual-record.js";
|
|
4
|
+
import { runAutoBasicCapture } from "./lib/commands/auto-basic.js";
|
|
5
|
+
import { runSampleExtraNodes } from "./lib/commands/sample-extra-nodes.js";
|
|
6
|
+
import { runSampleBranchEdges } from "./lib/commands/sample-branch-edges.js";
|
|
7
|
+
import { runSampleTrialInputs } from "./lib/commands/sample-trial-inputs.js";
|
|
8
|
+
import { runDoctor } from "./lib/commands/doctor.js";
|
|
9
|
+
import { runConfigure } from "./lib/commands/configure.js";
|
|
10
|
+
import { runPublish } from "./lib/commands/publish.js";
|
|
11
|
+
import { runListLibraries } from "./lib/commands/list-libraries.js";
|
|
12
|
+
|
|
13
|
+
const COMMANDS = {
|
|
14
|
+
doctor: runDoctor,
|
|
15
|
+
manual: runManualRecord,
|
|
16
|
+
"auto-basic": runAutoBasicCapture,
|
|
17
|
+
"sample-extra-nodes": runSampleExtraNodes,
|
|
18
|
+
"sample-branch-edges": runSampleBranchEdges,
|
|
19
|
+
"sample-trial-inputs": runSampleTrialInputs,
|
|
20
|
+
configure: runConfigure,
|
|
21
|
+
publish: runPublish,
|
|
22
|
+
"list-libraries": runListLibraries,
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
const cleanupCallbacks = [];
|
|
26
|
+
|
|
27
|
+
export function registerCleanup(fn) {
|
|
28
|
+
cleanupCallbacks.push(fn);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async function runCleanup() {
|
|
32
|
+
for (const fn of cleanupCallbacks) {
|
|
33
|
+
try {
|
|
34
|
+
await fn();
|
|
35
|
+
} catch {
|
|
36
|
+
// Best-effort cleanup
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
cleanupCallbacks.length = 0;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
let shuttingDown = false;
|
|
43
|
+
process.on("SIGINT", async () => {
|
|
44
|
+
if (shuttingDown) {
|
|
45
|
+
process.exit(1);
|
|
46
|
+
}
|
|
47
|
+
shuttingDown = true;
|
|
48
|
+
console.error("\nReceived SIGINT, cleaning up...");
|
|
49
|
+
await runCleanup();
|
|
50
|
+
process.exit(130);
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
process.on("SIGTERM", async () => {
|
|
54
|
+
console.error("\nReceived SIGTERM, cleaning up...");
|
|
55
|
+
await runCleanup();
|
|
56
|
+
process.exit(143);
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
async function main() {
|
|
60
|
+
const config = parseCliArgs(process.argv.slice(2));
|
|
61
|
+
|
|
62
|
+
if (config.help) {
|
|
63
|
+
console.log(buildHelpText());
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
const handler = COMMANDS[config.command];
|
|
68
|
+
if (!handler) {
|
|
69
|
+
throw new Error(`Unknown command: ${config.command}`);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
await handler(config);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
main().catch((error) => {
|
|
76
|
+
console.error(error.stack || error.message);
|
|
77
|
+
process.exitCode = 1;
|
|
78
|
+
});
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import fs from "node:fs/promises";
|
|
3
|
+
import { safeJsonParse, writeText } from "./util.js";
|
|
4
|
+
|
|
5
|
+
const KEYWORDS = [
|
|
6
|
+
"workflow",
|
|
7
|
+
"graph",
|
|
8
|
+
"node",
|
|
9
|
+
"edge",
|
|
10
|
+
"save",
|
|
11
|
+
"run",
|
|
12
|
+
"debug",
|
|
13
|
+
"intent",
|
|
14
|
+
"llm",
|
|
15
|
+
"model",
|
|
16
|
+
"dataCenter",
|
|
17
|
+
"publish",
|
|
18
|
+
];
|
|
19
|
+
|
|
20
|
+
function summarizeStream(request) {
|
|
21
|
+
const headerEntries = Object.entries(request.responseHeaders ?? {}).map(
|
|
22
|
+
([key, value]) => [String(key).toLowerCase(), value],
|
|
23
|
+
);
|
|
24
|
+
const contentType = headerEntries.find(([key]) => key === "content-type")?.[1];
|
|
25
|
+
|
|
26
|
+
if (String(contentType).includes("text/event-stream")) {
|
|
27
|
+
return "SSE";
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (
|
|
31
|
+
request.protocol?.toLowerCase().includes("http") &&
|
|
32
|
+
typeof request.responseBody === "string" &&
|
|
33
|
+
request.responseBody.includes("\n\n")
|
|
34
|
+
) {
|
|
35
|
+
return "Streaming HTTP chunks";
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
if (request.method === "WEBSOCKET") {
|
|
39
|
+
return "WebSocket";
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
return null;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
function unwrapSchemaContainer(payload) {
|
|
46
|
+
if (!payload || typeof payload !== "object") {
|
|
47
|
+
return null;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
const candidates = [
|
|
51
|
+
payload.params?.schemaJson,
|
|
52
|
+
payload.schemaJson,
|
|
53
|
+
payload.result?.schemaJson,
|
|
54
|
+
payload.result?.data?.schemaJson,
|
|
55
|
+
];
|
|
56
|
+
|
|
57
|
+
for (const candidate of candidates) {
|
|
58
|
+
const parsed = safeJsonParse(candidate);
|
|
59
|
+
if (parsed && typeof parsed === "object") {
|
|
60
|
+
return parsed;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if (Array.isArray(payload.nodes) || Array.isArray(payload.edges)) {
|
|
65
|
+
return payload;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (payload.graph && typeof payload.graph === "object") {
|
|
69
|
+
return payload.graph;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
if (payload.workflowGraph && typeof payload.workflowGraph === "object") {
|
|
73
|
+
return payload.workflowGraph;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (payload.data && typeof payload.data === "object") {
|
|
77
|
+
return unwrapSchemaContainer(payload.data);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
return null;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function typeName(value) {
|
|
84
|
+
if (Array.isArray(value)) {
|
|
85
|
+
return "array";
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if (value === null) {
|
|
89
|
+
return "null";
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
return typeof value;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function summarizeFieldTypes(record) {
|
|
96
|
+
if (!record || typeof record !== "object" || Array.isArray(record)) {
|
|
97
|
+
return {};
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
return Object.fromEntries(
|
|
101
|
+
Object.entries(record).map(([key, value]) => [key, typeName(value)]),
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
export function summarizeWorkflowSchema(request) {
|
|
106
|
+
const payload =
|
|
107
|
+
safeJsonParse(request.requestBody) ??
|
|
108
|
+
request.responseBodyJson ??
|
|
109
|
+
null;
|
|
110
|
+
|
|
111
|
+
if (!payload || typeof payload !== "object") {
|
|
112
|
+
return null;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const graph = unwrapSchemaContainer(payload) ?? payload.canvas ?? payload;
|
|
116
|
+
|
|
117
|
+
const nodes = Array.isArray(graph.nodes)
|
|
118
|
+
? graph.nodes.map((node) => ({
|
|
119
|
+
id: typeof node.id,
|
|
120
|
+
type: typeof node.type,
|
|
121
|
+
position: typeof (node.position ?? node.meta?.position),
|
|
122
|
+
data: typeof node.data,
|
|
123
|
+
}))
|
|
124
|
+
: [];
|
|
125
|
+
|
|
126
|
+
const edges = Array.isArray(graph.edges)
|
|
127
|
+
? graph.edges.map((edge) => summarizeFieldTypes(edge))
|
|
128
|
+
: [];
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
workflowId:
|
|
132
|
+
payload.workflowId ??
|
|
133
|
+
payload.params?.workflowId ??
|
|
134
|
+
payload.id ??
|
|
135
|
+
payload.data?.workflowId ??
|
|
136
|
+
null,
|
|
137
|
+
nodeCount: nodes.length,
|
|
138
|
+
edgeCount: edges.length,
|
|
139
|
+
nodeTypes: [...new Set((graph.nodes ?? []).map((node) => node.type).filter(Boolean))],
|
|
140
|
+
nodes: nodes.slice(0, 10),
|
|
141
|
+
edges: edges.slice(0, 10),
|
|
142
|
+
edgeSample: Array.isArray(graph.edges) ? graph.edges[0] ?? null : null,
|
|
143
|
+
streamType: summarizeStream(request),
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
export function buildStaticAnalysis(stepNetworks) {
|
|
148
|
+
const findings = [];
|
|
149
|
+
|
|
150
|
+
for (const step of stepNetworks) {
|
|
151
|
+
for (const request of step.network) {
|
|
152
|
+
const url = request.url ?? "";
|
|
153
|
+
const body = `${request.requestBody ?? ""}\n${request.responseBody ?? ""}`;
|
|
154
|
+
const matches = KEYWORDS.filter(
|
|
155
|
+
(keyword) => url.includes(keyword) || body.includes(keyword),
|
|
156
|
+
);
|
|
157
|
+
|
|
158
|
+
if (matches.length === 0) {
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
findings.push({
|
|
163
|
+
stepId: step.step.id,
|
|
164
|
+
stepTitle: step.step.title,
|
|
165
|
+
method: request.method,
|
|
166
|
+
url,
|
|
167
|
+
matches,
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
return findings;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
export async function writeStaticAnalysis(outputDir, stepNetworks) {
|
|
176
|
+
const findings = buildStaticAnalysis(stepNetworks);
|
|
177
|
+
const lines = ["# Static Analysis", ""];
|
|
178
|
+
|
|
179
|
+
if (findings.length === 0) {
|
|
180
|
+
lines.push("No keyword hits were found in captured request/response bodies.");
|
|
181
|
+
} else {
|
|
182
|
+
for (const finding of findings) {
|
|
183
|
+
lines.push(`## ${finding.stepTitle}`);
|
|
184
|
+
lines.push(`- Method: ${finding.method}`);
|
|
185
|
+
lines.push(`- URL: ${finding.url}`);
|
|
186
|
+
lines.push(`- Keyword hits: ${finding.matches.join(", ")}`);
|
|
187
|
+
lines.push("");
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
const bundleDir = path.join(outputDir, "bundles");
|
|
192
|
+
|
|
193
|
+
try {
|
|
194
|
+
const bundleFiles = await fs.readdir(bundleDir);
|
|
195
|
+
if (bundleFiles.length > 0) {
|
|
196
|
+
lines.push("## Saved Bundles");
|
|
197
|
+
for (const bundleFile of bundleFiles) {
|
|
198
|
+
const bundlePath = path.join(bundleDir, bundleFile);
|
|
199
|
+
const content = await fs.readFile(bundlePath, "utf8");
|
|
200
|
+
const matches = KEYWORDS.filter((keyword) => content.includes(keyword));
|
|
201
|
+
if (matches.length === 0) {
|
|
202
|
+
continue;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
lines.push(`- ${bundleFile}: ${matches.join(", ")}`);
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
} catch {
|
|
209
|
+
// No bundles saved.
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
await writeText(path.join(outputDir, "static-analysis.md"), `${lines.join("\n")}\n`);
|
|
213
|
+
}
|