@probelabs/probe 0.6.0-rc231 → 0.6.0-rc233
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/binaries/probe-v0.6.0-rc233-aarch64-apple-darwin.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc233-aarch64-unknown-linux-musl.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc233-x86_64-apple-darwin.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc233-x86_64-pc-windows-msvc.zip +0 -0
- package/bin/binaries/probe-v0.6.0-rc233-x86_64-unknown-linux-musl.tar.gz +0 -0
- package/build/agent/ProbeAgent.d.ts +2 -0
- package/build/agent/ProbeAgent.js +105 -12
- package/build/agent/dsl/agent-test.mjs +341 -0
- package/build/agent/dsl/analyze-test.mjs +237 -0
- package/build/agent/dsl/diag-test.mjs +78 -0
- package/build/agent/dsl/environment.js +387 -0
- package/build/agent/dsl/manual-test.mjs +662 -0
- package/build/agent/dsl/output-buffer-test.mjs +124 -0
- package/build/agent/dsl/pipeline-direct-test.mjs +147 -0
- package/build/agent/dsl/pipeline-test.mjs +223 -0
- package/build/agent/dsl/runtime.js +206 -0
- package/build/agent/dsl/sandbox-experiment.mjs +309 -0
- package/build/agent/dsl/transformer.js +156 -0
- package/build/agent/dsl/trigger-test.mjs +159 -0
- package/build/agent/dsl/validator.js +183 -0
- package/build/agent/index.js +18776 -7675
- package/build/agent/probeTool.js +9 -0
- package/build/agent/tools.js +9 -1
- package/build/delegate.js +12 -6
- package/build/index.js +5 -0
- package/build/tools/common.js +7 -0
- package/build/tools/executePlan.js +761 -0
- package/build/tools/index.js +4 -0
- package/cjs/agent/ProbeAgent.cjs +12891 -1797
- package/cjs/index.cjs +12395 -1292
- package/package.json +5 -1
- package/src/agent/ProbeAgent.d.ts +2 -0
- package/src/agent/ProbeAgent.js +105 -12
- package/src/agent/dsl/agent-test.mjs +341 -0
- package/src/agent/dsl/analyze-test.mjs +237 -0
- package/src/agent/dsl/diag-test.mjs +78 -0
- package/src/agent/dsl/environment.js +387 -0
- package/src/agent/dsl/manual-test.mjs +662 -0
- package/src/agent/dsl/output-buffer-test.mjs +124 -0
- package/src/agent/dsl/pipeline-direct-test.mjs +147 -0
- package/src/agent/dsl/pipeline-test.mjs +223 -0
- package/src/agent/dsl/runtime.js +206 -0
- package/src/agent/dsl/sandbox-experiment.mjs +309 -0
- package/src/agent/dsl/transformer.js +156 -0
- package/src/agent/dsl/trigger-test.mjs +159 -0
- package/src/agent/dsl/validator.js +183 -0
- package/src/agent/index.js +8 -0
- package/src/agent/probeTool.js +9 -0
- package/src/agent/tools.js +9 -1
- package/src/delegate.js +12 -6
- package/src/index.js +5 -0
- package/src/tools/common.js +7 -0
- package/src/tools/executePlan.js +761 -0
- package/src/tools/index.js +4 -0
- package/bin/binaries/probe-v0.6.0-rc231-aarch64-apple-darwin.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc231-aarch64-unknown-linux-musl.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc231-x86_64-apple-darwin.tar.gz +0 -0
- package/bin/binaries/probe-v0.6.0-rc231-x86_64-pc-windows-msvc.zip +0 -0
- package/bin/binaries/probe-v0.6.0-rc231-x86_64-unknown-linux-musl.tar.gz +0 -0
|
@@ -0,0 +1,761 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* execute_plan tool - DSL-based programmatic orchestration.
|
|
3
|
+
*
|
|
4
|
+
* Allows the LLM to write small JavaScript programs that orchestrate
|
|
5
|
+
* tool calls, keeping intermediate data out of the agent's context window.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { tool } from 'ai';
|
|
9
|
+
import { executePlanSchema, parseAndResolvePaths } from './common.js';
|
|
10
|
+
import { createDSLRuntime } from '../agent/dsl/runtime.js';
|
|
11
|
+
import { search } from '../search.js';
|
|
12
|
+
import { query } from '../query.js';
|
|
13
|
+
import { extract } from '../extract.js';
|
|
14
|
+
import { delegate } from '../delegate.js';
|
|
15
|
+
import { glob } from 'glob';
|
|
16
|
+
|
|
17
|
+
export { executePlanSchema };
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Strip markdown fences and XML tags that LLMs sometimes wrap code in.
|
|
21
|
+
*/
|
|
22
|
+
function stripCodeWrapping(code) {
|
|
23
|
+
let s = String(code || '');
|
|
24
|
+
// Strip markdown code fences
|
|
25
|
+
s = s.replace(/^```(?:javascript|js)?\n?/gm, '').replace(/```$/gm, '');
|
|
26
|
+
// Strip XML-style tags: <execute_plan>, </execute_plan>, <code>, </code>
|
|
27
|
+
s = s.replace(/<\/?(?:execute_plan|code)>/g, '');
|
|
28
|
+
return s.trim();
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Build DSL-compatible tool implementations from the agent's configOptions.
|
|
33
|
+
*
|
|
34
|
+
* @param {Object} configOptions - Agent config (sessionId, cwd, provider, model, etc.)
|
|
35
|
+
* @returns {Object} toolImplementations for createDSLRuntime
|
|
36
|
+
*/
|
|
37
|
+
function buildToolImplementations(configOptions) {
|
|
38
|
+
const { sessionId, cwd } = configOptions;
|
|
39
|
+
const tools = {};
|
|
40
|
+
|
|
41
|
+
tools.search = {
|
|
42
|
+
execute: async (params) => {
|
|
43
|
+
try {
|
|
44
|
+
let searchPaths;
|
|
45
|
+
if (params.path) {
|
|
46
|
+
searchPaths = parseAndResolvePaths(params.path, cwd);
|
|
47
|
+
}
|
|
48
|
+
if (!searchPaths || searchPaths.length === 0) {
|
|
49
|
+
searchPaths = [cwd || '.'];
|
|
50
|
+
}
|
|
51
|
+
return await search({
|
|
52
|
+
query: params.query,
|
|
53
|
+
path: searchPaths.join(' '),
|
|
54
|
+
cwd,
|
|
55
|
+
allowTests: true,
|
|
56
|
+
exact: params.exact || false,
|
|
57
|
+
json: false,
|
|
58
|
+
maxTokens: 20000,
|
|
59
|
+
session: sessionId,
|
|
60
|
+
timeout: 60,
|
|
61
|
+
});
|
|
62
|
+
} catch (e) {
|
|
63
|
+
return `Search error: ${e.message}`;
|
|
64
|
+
}
|
|
65
|
+
},
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
tools.query = {
|
|
69
|
+
execute: async (params) => {
|
|
70
|
+
try {
|
|
71
|
+
let queryPath = cwd || '.';
|
|
72
|
+
if (params.path) {
|
|
73
|
+
const resolved = parseAndResolvePaths(params.path, cwd);
|
|
74
|
+
if (resolved.length > 0) queryPath = resolved[0];
|
|
75
|
+
}
|
|
76
|
+
return await query({
|
|
77
|
+
pattern: params.pattern,
|
|
78
|
+
path: queryPath,
|
|
79
|
+
cwd,
|
|
80
|
+
language: params.language || 'rust',
|
|
81
|
+
allowTests: params.allow_tests ?? true,
|
|
82
|
+
});
|
|
83
|
+
} catch (e) {
|
|
84
|
+
return `Query error: ${e.message}`;
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
tools.extract = {
|
|
90
|
+
execute: async (params) => {
|
|
91
|
+
try {
|
|
92
|
+
if (!params.targets && !params.input_content) {
|
|
93
|
+
return 'Extract error: no file path provided. Usage: extract("path/to/file.md")';
|
|
94
|
+
}
|
|
95
|
+
return await extract({
|
|
96
|
+
files: params.targets ? [params.targets] : undefined,
|
|
97
|
+
content: params.input_content || undefined,
|
|
98
|
+
cwd,
|
|
99
|
+
allowTests: params.allow_tests ?? true,
|
|
100
|
+
});
|
|
101
|
+
} catch (e) {
|
|
102
|
+
return `Extract error: ${e.message}`;
|
|
103
|
+
}
|
|
104
|
+
},
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
tools.listFiles = {
|
|
108
|
+
execute: async (params) => {
|
|
109
|
+
try {
|
|
110
|
+
const files = await glob(params.pattern || '**/*', {
|
|
111
|
+
cwd: cwd || '.',
|
|
112
|
+
ignore: ['node_modules/**', '.git/**'],
|
|
113
|
+
nodir: true,
|
|
114
|
+
});
|
|
115
|
+
files.sort();
|
|
116
|
+
return files;
|
|
117
|
+
} catch (e) {
|
|
118
|
+
return `listFiles error: ${e.message}`;
|
|
119
|
+
}
|
|
120
|
+
},
|
|
121
|
+
};
|
|
122
|
+
|
|
123
|
+
return tools;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Build an llmCall function using delegate with disableTools.
|
|
128
|
+
*
|
|
129
|
+
* Uses the full delegate infrastructure (OTEL, retries, fallbacks, schema support)
|
|
130
|
+
* but with tools disabled and maxIterations: 1 since LLM() is pure text processing.
|
|
131
|
+
*
|
|
132
|
+
* @param {Object} configOptions - Agent config
|
|
133
|
+
* @returns {Function} llmCall(instruction, data, options?) => Promise<string>
|
|
134
|
+
*/
|
|
135
|
+
function buildLLMCall(configOptions) {
|
|
136
|
+
const { provider, model, debug, tracer, sessionId } = configOptions;
|
|
137
|
+
|
|
138
|
+
return async (instruction, data, options = {}) => {
|
|
139
|
+
const dataStr = data == null ? '' : (typeof data === 'string' ? data : JSON.stringify(data, null, 2));
|
|
140
|
+
const task = `${instruction}\n\n---\n\n${dataStr || '(empty)'}`;
|
|
141
|
+
|
|
142
|
+
return delegate({
|
|
143
|
+
task,
|
|
144
|
+
disableTools: true,
|
|
145
|
+
maxIterations: 1,
|
|
146
|
+
provider,
|
|
147
|
+
model,
|
|
148
|
+
debug,
|
|
149
|
+
tracer,
|
|
150
|
+
parentSessionId: sessionId,
|
|
151
|
+
schema: options.schema || null,
|
|
152
|
+
timeout: options.timeout || 120,
|
|
153
|
+
});
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Create the execute_plan tool for the Vercel AI SDK.
|
|
159
|
+
*
|
|
160
|
+
* Accepts EITHER:
|
|
161
|
+
* - Agent configOptions (sessionId, cwd, provider, model, etc.) — auto-builds tools + LLM via delegate
|
|
162
|
+
* - Direct DSL options (toolImplementations, llmCall, etc.) — used as-is (tests, manual scripts)
|
|
163
|
+
*
|
|
164
|
+
* @param {Object} options
|
|
165
|
+
* @returns {Object} Vercel AI SDK tool
|
|
166
|
+
*/
|
|
167
|
+
export function createExecutePlanTool(options) {
|
|
168
|
+
let runtimeOptions;
|
|
169
|
+
let llmCallFn;
|
|
170
|
+
const tracer = options.tracer || null;
|
|
171
|
+
|
|
172
|
+
// Session-scoped store persists across execute_plan calls within the same agent session
|
|
173
|
+
const sessionStore = options.sessionStore || {};
|
|
174
|
+
|
|
175
|
+
// Output buffer for direct-to-user content (bypasses LLM context window)
|
|
176
|
+
const outputBuffer = options.outputBuffer || null;
|
|
177
|
+
|
|
178
|
+
if (options.toolImplementations) {
|
|
179
|
+
// Direct DSL options — used by tests and manual scripts
|
|
180
|
+
runtimeOptions = { ...options, tracer, sessionStore, outputBuffer };
|
|
181
|
+
llmCallFn = options.llmCall;
|
|
182
|
+
} else {
|
|
183
|
+
// Agent configOptions — build everything from the agent's config
|
|
184
|
+
llmCallFn = buildLLMCall(options);
|
|
185
|
+
runtimeOptions = {
|
|
186
|
+
toolImplementations: buildToolImplementations(options),
|
|
187
|
+
llmCall: llmCallFn,
|
|
188
|
+
mcpBridge: options.mcpBridge || null,
|
|
189
|
+
mcpTools: options.mcpTools || {},
|
|
190
|
+
mapConcurrency: options.mapConcurrency || 5,
|
|
191
|
+
timeoutMs: options.timeoutMs || 300000,
|
|
192
|
+
maxLoopIterations: options.maxLoopIterations || 5000,
|
|
193
|
+
tracer,
|
|
194
|
+
sessionStore,
|
|
195
|
+
outputBuffer,
|
|
196
|
+
};
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
const runtime = createDSLRuntime(runtimeOptions);
|
|
200
|
+
const maxRetries = options.maxRetries ?? 2;
|
|
201
|
+
|
|
202
|
+
return tool({
|
|
203
|
+
description: 'Execute a JavaScript DSL program to orchestrate tool calls. ' +
|
|
204
|
+
'Use for batch processing, paginated APIs, multi-step workflows where intermediate data is large. ' +
|
|
205
|
+
'Write simple synchronous-looking code — do NOT use async/await.',
|
|
206
|
+
parameters: executePlanSchema,
|
|
207
|
+
execute: async ({ code, description }) => {
|
|
208
|
+
// Create top-level OTEL span for the entire execute_plan invocation
|
|
209
|
+
const planSpan = tracer?.createToolSpan?.('execute_plan', {
|
|
210
|
+
'dsl.description': description || '',
|
|
211
|
+
'dsl.code_length': code.length,
|
|
212
|
+
'dsl.code': code,
|
|
213
|
+
'dsl.max_retries': maxRetries,
|
|
214
|
+
}) || null;
|
|
215
|
+
|
|
216
|
+
// Strip XML tags and markdown fences LLMs sometimes wrap code in
|
|
217
|
+
let currentCode = stripCodeWrapping(code);
|
|
218
|
+
let lastError = null;
|
|
219
|
+
let finalOutput;
|
|
220
|
+
|
|
221
|
+
try {
|
|
222
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
223
|
+
// On retry, ask the LLM to fix the code
|
|
224
|
+
if (attempt > 0 && llmCallFn && lastError) {
|
|
225
|
+
planSpan?.addEvent?.('dsl.self_heal_start', {
|
|
226
|
+
'dsl.attempt': attempt,
|
|
227
|
+
'dsl.error': lastError.substring(0, 1000),
|
|
228
|
+
});
|
|
229
|
+
|
|
230
|
+
try {
|
|
231
|
+
const fixPrompt = `The following DSL script failed with an error. Fix the script and return ONLY the corrected JavaScript code — no markdown, no explanation, no backtick fences.
|
|
232
|
+
|
|
233
|
+
ORIGINAL SCRIPT:
|
|
234
|
+
${currentCode}
|
|
235
|
+
|
|
236
|
+
ERROR:
|
|
237
|
+
${lastError}
|
|
238
|
+
|
|
239
|
+
RULES REMINDER:
|
|
240
|
+
- search(query) is KEYWORD SEARCH — pass a search query, NOT a filename. Use extract(filepath) to read file contents.
|
|
241
|
+
- search(), query(), extract(), listFiles(), bash() all return STRINGS, not arrays.
|
|
242
|
+
- Use chunk(stringData) to split a string into an array of chunks.
|
|
243
|
+
- Use map(array, fn) only with arrays. Do NOT pass strings to map().
|
|
244
|
+
- Do NOT use .map(), .forEach(), .filter(), .join() — use for..of loops instead.
|
|
245
|
+
- Do NOT define helper functions that call tools — write logic inline.
|
|
246
|
+
- Do NOT use async/await, template literals, or shorthand properties.
|
|
247
|
+
- Do NOT use regex literals (/pattern/) — use String methods like indexOf, includes, startsWith instead.
|
|
248
|
+
- String concatenation with +, not template literals.`;
|
|
249
|
+
|
|
250
|
+
const fixedCode = await llmCallFn(fixPrompt, '', { maxTokens: 4000, temperature: 0.2 });
|
|
251
|
+
// Strip markdown fences and XML tags the LLM might add
|
|
252
|
+
currentCode = stripCodeWrapping(fixedCode);
|
|
253
|
+
|
|
254
|
+
planSpan?.addEvent?.('dsl.self_heal_complete', {
|
|
255
|
+
'dsl.attempt': attempt,
|
|
256
|
+
'dsl.fixed_code_length': currentCode.length,
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
if (!currentCode) {
|
|
260
|
+
finalOutput = `Plan execution failed after ${attempt} retries: LLM returned empty fix.\n\nLast error: ${lastError}`;
|
|
261
|
+
planSpan?.setAttributes?.({ 'dsl.result': 'empty_fix', 'dsl.attempts': attempt });
|
|
262
|
+
planSpan?.setStatus?.('ERROR');
|
|
263
|
+
planSpan?.end?.();
|
|
264
|
+
return finalOutput;
|
|
265
|
+
}
|
|
266
|
+
} catch (fixError) {
|
|
267
|
+
finalOutput = `Plan execution failed and self-heal failed: ${fixError.message}\n\nOriginal error: ${lastError}`;
|
|
268
|
+
planSpan?.setAttributes?.({ 'dsl.result': 'self_heal_error', 'dsl.attempts': attempt });
|
|
269
|
+
planSpan?.setStatus?.('ERROR');
|
|
270
|
+
planSpan?.end?.();
|
|
271
|
+
return finalOutput;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
const result = await runtime.execute(currentCode, description);
|
|
276
|
+
|
|
277
|
+
if (result.status === 'success') {
|
|
278
|
+
finalOutput = formatSuccess(result, description, attempt, outputBuffer);
|
|
279
|
+
planSpan?.setAttributes?.({
|
|
280
|
+
'dsl.result': 'success',
|
|
281
|
+
'dsl.attempts': attempt,
|
|
282
|
+
'dsl.self_healed': attempt > 0,
|
|
283
|
+
'dsl.result_length': finalOutput.length,
|
|
284
|
+
'dsl.log_count': result.logs.length,
|
|
285
|
+
});
|
|
286
|
+
planSpan?.setStatus?.('OK');
|
|
287
|
+
planSpan?.end?.();
|
|
288
|
+
return finalOutput;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// Execution failed — prepare for retry
|
|
292
|
+
const logOutput = result.logs.length > 0 ? `\nLogs: ${result.logs.join(' | ')}` : '';
|
|
293
|
+
lastError = `${result.error}${logOutput}`;
|
|
294
|
+
|
|
295
|
+
planSpan?.addEvent?.('dsl.execution_failed', {
|
|
296
|
+
'dsl.attempt': attempt,
|
|
297
|
+
'dsl.error': lastError.substring(0, 1000),
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// All retries exhausted
|
|
302
|
+
finalOutput = `Plan execution failed after ${maxRetries} retries.\n\nLast error: ${lastError}`;
|
|
303
|
+
planSpan?.setAttributes?.({
|
|
304
|
+
'dsl.result': 'all_retries_exhausted',
|
|
305
|
+
'dsl.attempts': maxRetries,
|
|
306
|
+
'dsl.last_error': lastError?.substring(0, 1000),
|
|
307
|
+
});
|
|
308
|
+
planSpan?.setStatus?.('ERROR');
|
|
309
|
+
planSpan?.end?.();
|
|
310
|
+
return finalOutput;
|
|
311
|
+
} catch (e) {
|
|
312
|
+
planSpan?.setStatus?.('ERROR');
|
|
313
|
+
planSpan?.addEvent?.('exception', {
|
|
314
|
+
'exception.message': e.message,
|
|
315
|
+
'exception.stack': e.stack,
|
|
316
|
+
});
|
|
317
|
+
planSpan?.end?.();
|
|
318
|
+
throw e;
|
|
319
|
+
}
|
|
320
|
+
},
|
|
321
|
+
});
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
function formatSuccess(result, description, attempt, outputBuffer) {
|
|
325
|
+
let output = '';
|
|
326
|
+
|
|
327
|
+
if (description) {
|
|
328
|
+
output += `Plan: ${description}\n\n`;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
if (attempt > 0) {
|
|
332
|
+
output += `(Self-healed after ${attempt} ${attempt === 1 ? 'retry' : 'retries'})\n\n`;
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
if (result.logs.length > 0) {
|
|
336
|
+
const userLogs = result.logs.filter(l => !l.startsWith('[runtime]') && !l.startsWith('[output]'));
|
|
337
|
+
if (userLogs.length > 0) {
|
|
338
|
+
output += `Logs:\n${userLogs.join('\n')}\n\n`;
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
// Format the result value
|
|
343
|
+
const resultValue = result.result;
|
|
344
|
+
if (resultValue === undefined || resultValue === null) {
|
|
345
|
+
output += 'Plan completed (no return value).';
|
|
346
|
+
} else if (typeof resultValue === 'string') {
|
|
347
|
+
output += `Result:\n${resultValue}`;
|
|
348
|
+
} else {
|
|
349
|
+
try {
|
|
350
|
+
output += `Result:\n${JSON.stringify(resultValue, null, 2)}`;
|
|
351
|
+
} catch {
|
|
352
|
+
output += `Result: ${String(resultValue)}`;
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// If output buffer has content, tell the LLM the data was written to direct output
|
|
357
|
+
if (outputBuffer && outputBuffer.items && outputBuffer.items.length > 0) {
|
|
358
|
+
const totalChars = outputBuffer.items.reduce((sum, item) => sum + item.length, 0);
|
|
359
|
+
output += `\n\n[Output buffer: ${totalChars} chars written via output(). This content will be appended directly to your response. Do NOT repeat or summarize it.]`;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
return output;
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
/**
|
|
366
|
+
* XML tool definition for the system prompt.
|
|
367
|
+
*
|
|
368
|
+
* @param {string[]} availableFunctions - List of available DSL function names
|
|
369
|
+
* @returns {string} Tool definition text
|
|
370
|
+
*/
|
|
371
|
+
export function getExecutePlanToolDefinition(availableFunctions = []) {
|
|
372
|
+
const funcList = availableFunctions.length > 0
|
|
373
|
+
? availableFunctions.join(', ')
|
|
374
|
+
: 'search, query, extract, LLM, map, chunk, batch, listFiles, bash, log, range, flatten, unique, groupBy, parseJSON, storeSet, storeGet, storeAppend, storeKeys, storeGetAll, output';
|
|
375
|
+
|
|
376
|
+
return `## execute_plan
|
|
377
|
+
Description: Execute a JavaScript DSL program to orchestrate tool calls. Use for batch processing, large data analysis, and multi-step workflows where intermediate data is large.
|
|
378
|
+
|
|
379
|
+
ALWAYS use this tool when:
|
|
380
|
+
- The question asks about "all", "every", "comprehensive", "complete", or "inventory" of something
|
|
381
|
+
- The question covers multiple topics or requires scanning across the full codebase
|
|
382
|
+
- Open-ended discovery questions where you don't know the right search keywords (use the discovery-first pattern)
|
|
383
|
+
- Processing large search results that exceed context limits
|
|
384
|
+
- Iterating over paginated APIs or many files
|
|
385
|
+
- Batch operations with the same logic applied to many items
|
|
386
|
+
- Chaining multiple tool calls where intermediate data is large
|
|
387
|
+
|
|
388
|
+
Do NOT use this tool for:
|
|
389
|
+
- Simple single searches or extractions (1-2 tool calls)
|
|
390
|
+
- Questions about a specific function, class, or file
|
|
391
|
+
- Tasks where you need to see and reason about every detail of results
|
|
392
|
+
|
|
393
|
+
Parameters:
|
|
394
|
+
- code: (required) JavaScript DSL code to execute. Write synchronous-looking code — do NOT use async/await.
|
|
395
|
+
- description: (optional) Human-readable description of what this plan does.
|
|
396
|
+
|
|
397
|
+
<examples>
|
|
398
|
+
|
|
399
|
+
Discovery-first analysis (RECOMMENDED for open-ended questions — explore before searching):
|
|
400
|
+
<execute_plan>
|
|
401
|
+
<code>
|
|
402
|
+
const files = listFiles("**/*");
|
|
403
|
+
const sample = search("initial keyword");
|
|
404
|
+
const plan = LLM(
|
|
405
|
+
"Based on this repo structure and sample results, suggest the best search strategy. " +
|
|
406
|
+
"Return JSON: {keywords: [2-4 queries], extractionFocus: string, aggregation: string}. ONLY valid JSON.",
|
|
407
|
+
"Files:\\n" + String(files).substring(0, 3000) + "\\nSample:\\n" + String(sample).substring(0, 3000)
|
|
408
|
+
);
|
|
409
|
+
const strategy = parseJSON(plan);
|
|
410
|
+
log("Strategy: " + strategy.keywords.length + " keywords");
|
|
411
|
+
const allFindings = [];
|
|
412
|
+
for (const kw of strategy.keywords) {
|
|
413
|
+
const results = search(kw);
|
|
414
|
+
if (String(results).length > 500) {
|
|
415
|
+
const chunks = chunk(results);
|
|
416
|
+
const findings = map(chunks, (c) => LLM(strategy.extractionFocus, c));
|
|
417
|
+
for (const f of findings) { allFindings.push(String(f)); }
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
var combined = "";
|
|
421
|
+
for (const f of allFindings) { combined = combined + f + "\\n---\\n"; }
|
|
422
|
+
return LLM("Synthesize all findings into a comprehensive answer.", combined);
|
|
423
|
+
</code>
|
|
424
|
+
<description>Discover optimal search strategy, then analyze</description>
|
|
425
|
+
</execute_plan>
|
|
426
|
+
|
|
427
|
+
Analyze large search results:
|
|
428
|
+
<execute_plan>
|
|
429
|
+
<code>
|
|
430
|
+
const results = search("error handling");
|
|
431
|
+
const chunks = chunk(results);
|
|
432
|
+
log("Processing " + chunks.length + " chunks");
|
|
433
|
+
const extracted = map(chunks, (c) => LLM("List error handling patterns found. Be brief.", c));
|
|
434
|
+
var combined = "";
|
|
435
|
+
for (const e of extracted) { combined = combined + String(e) + "\\n---\\n"; }
|
|
436
|
+
return LLM("Combine into a summary.", combined);
|
|
437
|
+
</code>
|
|
438
|
+
<description>Analyze error handling patterns across the codebase</description>
|
|
439
|
+
</execute_plan>
|
|
440
|
+
|
|
441
|
+
Multi-topic analysis:
|
|
442
|
+
<execute_plan>
|
|
443
|
+
<code>
|
|
444
|
+
const topics = ["authentication", "authorization"];
|
|
445
|
+
const allFindings = [];
|
|
446
|
+
for (const topic of topics) {
|
|
447
|
+
const results = search(topic);
|
|
448
|
+
const chunks = chunk(results);
|
|
449
|
+
const findings = map(chunks, (c) => LLM("Extract key findings about " + topic + ". Be brief.", c));
|
|
450
|
+
for (const f of findings) { allFindings.push(String(f)); }
|
|
451
|
+
}
|
|
452
|
+
var combined = "";
|
|
453
|
+
for (const f of allFindings) { combined = combined + f + "\\n---\\n"; }
|
|
454
|
+
return LLM("Synthesize all findings into a report.", combined);
|
|
455
|
+
</code>
|
|
456
|
+
<description>Cross-topic analysis of auth patterns</description>
|
|
457
|
+
</execute_plan>
|
|
458
|
+
|
|
459
|
+
Process each file individually (use extract to read files, NOT search):
|
|
460
|
+
<execute_plan>
|
|
461
|
+
<code>
|
|
462
|
+
const files = listFiles("**/*.md");
|
|
463
|
+
log("Found " + files.length + " files");
|
|
464
|
+
const batches = batch(files, 5);
|
|
465
|
+
const results = [];
|
|
466
|
+
for (const b of batches) {
|
|
467
|
+
const batchResults = map(b, (filepath) => {
|
|
468
|
+
try {
|
|
469
|
+
const content = extract(filepath);
|
|
470
|
+
if (String(content).length > 100) {
|
|
471
|
+
const info = LLM("Extract: customer name, industry, key use case. Return JSON: {customer, industry, useCase}. ONLY JSON.", content);
|
|
472
|
+
try { return parseJSON(info); } catch (e) { return null; }
|
|
473
|
+
}
|
|
474
|
+
} catch (e) { return null; }
|
|
475
|
+
return null;
|
|
476
|
+
});
|
|
477
|
+
for (const r of batchResults) { if (r) { results.push(r); } }
|
|
478
|
+
log("Batch done, total: " + results.length);
|
|
479
|
+
}
|
|
480
|
+
var table = "| Customer | Industry | Use Case |";
|
|
481
|
+
for (const r of results) {
|
|
482
|
+
table = table + "\n| " + r.customer + " | " + r.industry + " | " + r.useCase + " |";
|
|
483
|
+
}
|
|
484
|
+
return table;
|
|
485
|
+
</code>
|
|
486
|
+
<description>Read each file with extract() and classify with LLM</description>
|
|
487
|
+
</execute_plan>
|
|
488
|
+
|
|
489
|
+
</examples>
|
|
490
|
+
|
|
491
|
+
### Rules
|
|
492
|
+
- Write simple, synchronous-looking JavaScript. Do NOT use async/await — the runtime injects it automatically.
|
|
493
|
+
- Do NOT use: class, new, import, require, eval, this, Promise, async, await, setTimeout.
|
|
494
|
+
- Do NOT use these as variable names: eval, Function, require, process, globalThis, constructor, prototype, exports, Proxy, Reflect, Symbol.
|
|
495
|
+
- Use \`map(items, fn)\` for **parallel** batch processing. Use \`for..of\` only for **sequential** logic where order matters.
|
|
496
|
+
- **CRITICAL: When processing multiple files**, use \`batch(files, 5)\` + \`map(batch, fn)\` for parallel processing. NEVER use a sequential for..of loop with LLM() or extract() calls on many files — it will timeout.
|
|
497
|
+
- Do NOT use Array.prototype.map (.map()) — use the global \`map()\` function instead.
|
|
498
|
+
- Use \`LLM(instruction, data)\` for AI processing — returns a string.
|
|
499
|
+
- Use \`log(message)\` for debugging — messages appear in the output.
|
|
500
|
+
- Use \`parseJSON(text)\` instead of \`JSON.parse()\` when parsing LLM output — LLM responses often have markdown fences.
|
|
501
|
+
- Tool functions never throw — on error they return an \`"ERROR: ..."\` string. Check with \`if (result.indexOf("ERROR:") === 0)\` to handle errors.
|
|
502
|
+
- Always use explicit property assignment: \`{ key: value }\` not shorthand \`{ key }\`.
|
|
503
|
+
- String concatenation with \`+\`, no template literals with backticks.
|
|
504
|
+
- Use \`String(value)\` before calling \`.trim()\`, \`.split()\`, or \`.length\` on tool results.
|
|
505
|
+
- Use \`for (const item of array)\` loops instead of \`.forEach()\`, \`.map()\`, \`.filter()\`, or \`.join()\` array methods.
|
|
506
|
+
- Do NOT define helper functions that call tools. Write all logic inline or use for..of loops.
|
|
507
|
+
- Do NOT use regex literals (/pattern/) — use String methods like indexOf, includes, startsWith instead.
|
|
508
|
+
- ONLY use functions listed below. Do NOT call functions that are not listed.
|
|
509
|
+
|
|
510
|
+
### Available functions
|
|
511
|
+
|
|
512
|
+
**Tools (async, auto-awaited):**
|
|
513
|
+
${funcList}
|
|
514
|
+
|
|
515
|
+
**Return types — IMPORTANT:**
|
|
516
|
+
- \`search(query)\` → **keyword search** — pass a search query (e.g. "error handling"), NOT a filename. Returns a **string** (matching code snippets). To process parts, use \`chunk()\` to split it.
|
|
517
|
+
- \`query(pattern)\` → **AST search** — pass a tree-sitter pattern. Returns a **string** (matching code elements).
|
|
518
|
+
- \`extract(targets)\` → **read file contents** — pass a file path like "src/main.js" or "src/main.js:42". Use this to read specific files found by listFiles(). Returns a **string**.
|
|
519
|
+
- \`listFiles(pattern)\` → **list files** — pass a glob pattern like "**/*.md". Returns an **array** of file path strings. Use directly with \`for (const f of listFiles("**/*.md"))\`.
|
|
520
|
+
- \`LLM(instruction, data)\` → returns a **string** (AI response)
|
|
521
|
+
- \`map(array, fn)\` → returns an **array** of results. First argument MUST be an array.
|
|
522
|
+
- \`bash(command)\` → returns a **string** (command output)
|
|
523
|
+
|
|
524
|
+
**COMMON MISTAKE:** Do NOT use \`search(filename)\` to read a file's contents — search() is for keyword queries. Use \`extract(filepath)\` to read file contents.
|
|
525
|
+
|
|
526
|
+
**Parallel processing:**
|
|
527
|
+
- \`map(array, fn)\` — process array items **in parallel** (concurrency=3). Use this for batch operations, NOT for..of loops.
|
|
528
|
+
|
|
529
|
+
**Utilities (sync):**
|
|
530
|
+
- \`chunk(data, tokens)\` — split a string into token-sized array of chunks (default 20000 tokens). Returns an **array of strings**.
|
|
531
|
+
- \`batch(array, size)\` — split an array into sub-arrays of \`size\` (default 10). Returns an **array of arrays**.
|
|
532
|
+
- \`log(message)\` — log a message (collected in output)
|
|
533
|
+
- \`range(start, end)\` — generate array of integers [start, end)
|
|
534
|
+
- \`flatten(arr)\` — flatten one level of nesting
|
|
535
|
+
- \`unique(arr)\` — deduplicate array
|
|
536
|
+
- \`groupBy(arr, key)\` — group array of objects by key or function
|
|
537
|
+
- \`parseJSON(text)\` — **safely parse JSON from LLM responses**. Strips markdown fences and extracts JSON. ALWAYS use \`parseJSON()\` instead of \`JSON.parse()\` when parsing LLM output.
|
|
538
|
+
|
|
539
|
+
**Direct output (sync):**
|
|
540
|
+
- \`output(content)\` — **write content directly to the user's response**, bypassing LLM rewriting. Use for large tables, JSON, or CSV that should be delivered verbatim. Can be called multiple times; all content is appended to the final response. The \`return\` value still goes to the tool result for you to see.
|
|
541
|
+
|
|
542
|
+
**Session store (sync, persists across execute_plan calls):**
|
|
543
|
+
- \`storeSet(key, value)\` — store a value that persists across execute_plan calls in this session
|
|
544
|
+
- \`storeGet(key)\` — retrieve a stored value (returns undefined if not found)
|
|
545
|
+
- \`storeAppend(key, item)\` — append item to an array in the store (auto-creates array if key doesn't exist)
|
|
546
|
+
- \`storeKeys()\` — list all keys in the store
|
|
547
|
+
- \`storeGetAll()\` — return entire store as a plain object
|
|
548
|
+
|
|
549
|
+
### Patterns
|
|
550
|
+
|
|
551
|
+
**Pattern 1: Discovery-first (RECOMMENDED for open-ended questions)**
|
|
552
|
+
When you don't know the right keywords, explore the repo first, then use LLM to determine the best search strategy:
|
|
553
|
+
\`\`\`
|
|
554
|
+
// Phase 1: Discover repo structure and test queries
|
|
555
|
+
const files = listFiles("**/*");
|
|
556
|
+
const sample = search("initial keyword guess");
|
|
557
|
+
log("Files overview length: " + String(files).length + ", sample length: " + String(sample).length);
|
|
558
|
+
|
|
559
|
+
// Phase 2: Ask LLM to determine optimal strategy based on what exists
|
|
560
|
+
const plan = LLM(
|
|
561
|
+
"Based on this repository structure and sample search results, determine the best search strategy. " +
|
|
562
|
+
"Return a JSON object with: keywords (array of 2-4 search queries that will find relevant data), " +
|
|
563
|
+
"extractionFocus (what to extract from each result), " +
|
|
564
|
+
"and aggregation (summarize, list_unique, count, or group_by). " +
|
|
565
|
+
"IMPORTANT: Only suggest keywords likely to match actual content you see. Return ONLY valid JSON.",
|
|
566
|
+
"Repository files:\\n" + String(files).substring(0, 3000) + "\\nSample results:\\n" + String(sample).substring(0, 3000)
|
|
567
|
+
);
|
|
568
|
+
const strategy = parseJSON(plan);
|
|
569
|
+
log("Strategy: " + strategy.keywords.length + " keywords, focus: " + strategy.extractionFocus);
|
|
570
|
+
|
|
571
|
+
// Phase 3: Execute with discovered strategy
|
|
572
|
+
const allFindings = [];
|
|
573
|
+
for (const kw of strategy.keywords) {
|
|
574
|
+
const results = search(kw);
|
|
575
|
+
if (String(results).length > 500) {
|
|
576
|
+
const chunks = chunk(results);
|
|
577
|
+
const findings = map(chunks, (c) => LLM(strategy.extractionFocus, c));
|
|
578
|
+
for (const f of findings) { allFindings.push(String(f)); }
|
|
579
|
+
log("Keyword '" + kw + "': " + chunks.length + " chunks processed");
|
|
580
|
+
} else {
|
|
581
|
+
log("Keyword '" + kw + "': skipped (too few results)");
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
var combined = "";
|
|
585
|
+
for (const f of allFindings) { combined = combined + f + "\\n---\\n"; }
|
|
586
|
+
return LLM("Synthesize all findings into a comprehensive answer.", combined);
|
|
587
|
+
\`\`\`
|
|
588
|
+
|
|
589
|
+
**Pattern 2: Large result analysis**
|
|
590
|
+
search() returns a big string. Split into 20K-token chunks, process in parallel, synthesize:
|
|
591
|
+
\`\`\`
|
|
592
|
+
const results = search("error handling");
|
|
593
|
+
const chunks = chunk(results);
|
|
594
|
+
log("Processing " + chunks.length + " chunks");
|
|
595
|
+
const extracted = map(chunks, (c) => LLM("List error handling patterns found. Be brief.", c));
|
|
596
|
+
var combined = "";
|
|
597
|
+
for (const e of extracted) { combined = combined + String(e) + "\\n---\\n"; }
|
|
598
|
+
return LLM("Combine into a summary. Max 5 bullet points.", combined);
|
|
599
|
+
\`\`\`
|
|
600
|
+
|
|
601
|
+
**Pattern 3: Paginated API with while loop**
|
|
602
|
+
For APIs that return pages of results:
|
|
603
|
+
\`\`\`
|
|
604
|
+
const allItems = [];
|
|
605
|
+
let page = 1;
|
|
606
|
+
while (true) {
|
|
607
|
+
const result = mcp_api_list_items({ page: page, per_page: 50 });
|
|
608
|
+
for (const item of result.items) {
|
|
609
|
+
allItems.push(item);
|
|
610
|
+
}
|
|
611
|
+
log("Page " + page + ": " + result.items.length + " items");
|
|
612
|
+
if (!result.has_more) break;
|
|
613
|
+
page = page + 1;
|
|
614
|
+
}
|
|
615
|
+
return allItems;
|
|
616
|
+
\`\`\`
|
|
617
|
+
|
|
618
|
+
**Pattern 4: Batch classify/process with map**
|
|
619
|
+
For processing many items in parallel:
|
|
620
|
+
\`\`\`
|
|
621
|
+
const items = mcp_api_get_tickets({ status: "open" });
|
|
622
|
+
const classified = map(items, (item) => {
|
|
623
|
+
const sentiment = LLM("Classify as positive, negative, or neutral. Return ONLY the word.", item.description);
|
|
624
|
+
return { id: item.id, title: item.title, sentiment: String(sentiment).trim() };
|
|
625
|
+
});
|
|
626
|
+
return groupBy(classified, "sentiment");
|
|
627
|
+
\`\`\`
|
|
628
|
+
|
|
629
|
+
**Pattern 5: Multi-search with error handling**
|
|
630
|
+
For searching multiple topics and combining results:
|
|
631
|
+
\`\`\`
|
|
632
|
+
const queries = ["authentication", "authorization", "session management"];
|
|
633
|
+
const results = [];
|
|
634
|
+
for (const q of queries) {
|
|
635
|
+
try {
|
|
636
|
+
const r = search(q);
|
|
637
|
+
if (r.length > 500) {
|
|
638
|
+
const summary = LLM("Summarize the key patterns found. Be concise.", r);
|
|
639
|
+
results.push({ query: q, summary: summary });
|
|
640
|
+
} else {
|
|
641
|
+
results.push({ query: q, summary: "No significant results" });
|
|
642
|
+
}
|
|
643
|
+
} catch (e) {
|
|
644
|
+
results.push({ query: q, summary: "Search failed" });
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
return LLM("Combine these findings into a security overview.", results);
|
|
648
|
+
\`\`\`
|
|
649
|
+
|
|
650
|
+
**Pattern 6: Iterative deepening**
|
|
651
|
+
Search broadly, then drill into specific findings:
|
|
652
|
+
\`\`\`
|
|
653
|
+
const broad = search("database");
|
|
654
|
+
const keyFunctions = LLM("List the 3 most important function names. Return comma-separated, nothing else.", broad);
|
|
655
|
+
const names = [];
|
|
656
|
+
const parts = keyFunctions.split(",");
|
|
657
|
+
for (const p of parts) {
|
|
658
|
+
const trimmed = p.trim();
|
|
659
|
+
if (trimmed.length > 0) names.push(trimmed);
|
|
660
|
+
}
|
|
661
|
+
const details = map(names, (fn) => {
|
|
662
|
+
const code = search(fn);
|
|
663
|
+
return { name: fn, analysis: LLM("Explain what " + fn + " does in 2 sentences.", code) };
|
|
664
|
+
});
|
|
665
|
+
return details;
|
|
666
|
+
\`\`\`
|
|
667
|
+
|
|
668
|
+
**Pattern 7: Multi-topic analysis with chunking**
|
|
669
|
+
Search multiple topics, chunk each result, process in parallel:
|
|
670
|
+
\`\`\`
|
|
671
|
+
const topics = ["authentication", "authorization", "session"];
|
|
672
|
+
const allFindings = [];
|
|
673
|
+
for (const topic of topics) {
|
|
674
|
+
const results = search(topic);
|
|
675
|
+
const chunks = chunk(results);
|
|
676
|
+
const findings = map(chunks, (c) => LLM("Extract key patterns for " + topic + ". Be brief.", c));
|
|
677
|
+
for (const f of findings) { allFindings.push(String(f)); }
|
|
678
|
+
log("Processed " + topic + ": " + chunks.length + " chunks");
|
|
679
|
+
}
|
|
680
|
+
var combined = "";
|
|
681
|
+
for (const f of allFindings) { combined = combined + f + "\\n---\\n"; }
|
|
682
|
+
return LLM("Synthesize all findings into a security report.", combined);
|
|
683
|
+
\`\`\`
|
|
684
|
+
|
|
685
|
+
**Pattern 8: Batched file processing**
|
|
686
|
+
Process many files in parallel batches:
|
|
687
|
+
\`\`\`
|
|
688
|
+
const files = listFiles("*.js");
|
|
689
|
+
log("Found " + files.length + " files");
|
|
690
|
+
const batches = batch(files, 5);
|
|
691
|
+
const allResults = [];
|
|
692
|
+
for (const b of batches) {
|
|
693
|
+
const batchResults = map(b, (file) => {
|
|
694
|
+
const content = extract(file);
|
|
695
|
+
return LLM("Summarize this file in one sentence.", content);
|
|
696
|
+
});
|
|
697
|
+
for (const r of batchResults) { allResults.push(r); }
|
|
698
|
+
log("Processed batch, total: " + allResults.length);
|
|
699
|
+
}
|
|
700
|
+
return allResults;
|
|
701
|
+
\`\`\`
|
|
702
|
+
|
|
703
|
+
**Pattern 9: Data pipeline with session store**
|
|
704
|
+
Extract structured data, accumulate, compute statistics with pure JS, format as table:
|
|
705
|
+
\`\`\`
|
|
706
|
+
// Phase 1: Extract structured data from search results
|
|
707
|
+
const results = search("API endpoints");
|
|
708
|
+
const chunks = chunk(results);
|
|
709
|
+
const extracted = map(chunks, (c) => LLM(
|
|
710
|
+
"Extract API endpoints as JSON array: [{method, path, description}]. Return ONLY valid JSON.",
|
|
711
|
+
c
|
|
712
|
+
));
|
|
713
|
+
for (const e of extracted) {
|
|
714
|
+
const parsed = parseJSON(String(e));
|
|
715
|
+
if (parsed) {
|
|
716
|
+
for (const item of parsed) { storeAppend("endpoints", item); }
|
|
717
|
+
} else { log("Parse error, skipping chunk"); }
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
// Phase 2: Compute statistics with pure JS (no LLM needed)
|
|
721
|
+
const all = storeGet("endpoints");
|
|
722
|
+
log("Total endpoints: " + all.length);
|
|
723
|
+
const byMethod = groupBy(all, "method");
|
|
724
|
+
var table = "| Method | Count | % |\\n|--------|-------|---|\\n";
|
|
725
|
+
const methods = Object.keys(byMethod);
|
|
726
|
+
for (const m of methods) {
|
|
727
|
+
const count = byMethod[m].length;
|
|
728
|
+
const pct = Math.round(count / all.length * 100);
|
|
729
|
+
table = table + "| " + m + " | " + count + " | " + pct + "% |\\n";
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
// Phase 3: Small LLM summary of the statistics
|
|
733
|
+
const summary = LLM("Write a 2-sentence executive summary of this API surface analysis.", table);
|
|
734
|
+
return table + "\\n" + summary;
|
|
735
|
+
\`\`\`
|
|
736
|
+
|
|
737
|
+
**Pattern 10: Direct output for large structured data**
|
|
738
|
+
Use \`output()\` to deliver tables/JSON directly to the user without LLM rewriting. The \`return\` value is what you (the AI) see as the tool result:
|
|
739
|
+
\`\`\`
|
|
740
|
+
const files = listFiles("**/*.md");
|
|
741
|
+
const batches = batch(files, 5);
|
|
742
|
+
const results = [];
|
|
743
|
+
for (const b of batches) {
|
|
744
|
+
const batchResults = map(b, (f) => {
|
|
745
|
+
try {
|
|
746
|
+
const content = extract(f);
|
|
747
|
+
return LLM("Extract: name, category. Return JSON: {name, category}. ONLY JSON.", content);
|
|
748
|
+
} catch (e) { return null; }
|
|
749
|
+
});
|
|
750
|
+
for (const r of batchResults) {
|
|
751
|
+
try { if (r) results.push(parseJSON(r)); } catch (e) { /* skip */ }
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
var table = "| Name | Category |\\n|------|----------|\\n";
|
|
755
|
+
for (const r of results) {
|
|
756
|
+
table = table + "| " + (r.name || "?") + " | " + (r.category || "?") + " |\\n";
|
|
757
|
+
}
|
|
758
|
+
output(table);
|
|
759
|
+
return "Generated table with " + results.length + " items.";
|
|
760
|
+
\`\`\``;
|
|
761
|
+
}
|