@polka-codes/core 0.9.81 → 0.9.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +1539 -1016
- package/package.json +5 -5
- package/dist/_tsup-dts-rollup.d.ts +0 -1649
- package/dist/index.d.ts +0 -109
package/dist/index.js
CHANGED
|
@@ -139,6 +139,15 @@ var ruleSchema = z.union([
|
|
|
139
139
|
branch: z.string().optional()
|
|
140
140
|
}).strict()
|
|
141
141
|
]);
|
|
142
|
+
var providerConfigSchema = z.object({
|
|
143
|
+
apiKey: z.string().optional(),
|
|
144
|
+
defaultModel: z.string().optional(),
|
|
145
|
+
defaultParameters: z.record(z.string(), z.any()).optional(),
|
|
146
|
+
location: z.string().optional(),
|
|
147
|
+
project: z.string().optional(),
|
|
148
|
+
keyFile: z.string().optional(),
|
|
149
|
+
baseUrl: z.string().optional()
|
|
150
|
+
});
|
|
142
151
|
var providerModelSchema = z.object({
|
|
143
152
|
provider: z.string().optional(),
|
|
144
153
|
model: z.string().optional(),
|
|
@@ -146,6 +155,53 @@ var providerModelSchema = z.object({
|
|
|
146
155
|
budget: z.number().positive().optional(),
|
|
147
156
|
rules: z.array(ruleSchema).optional().or(z.string()).optional()
|
|
148
157
|
});
|
|
158
|
+
var scriptSchema = z.union([
|
|
159
|
+
// Type 1: Simple shell command (backward compatible)
|
|
160
|
+
z.string(),
|
|
161
|
+
// Type 2: Object with command and description (backward compatible)
|
|
162
|
+
z.object({
|
|
163
|
+
command: z.string(),
|
|
164
|
+
description: z.string()
|
|
165
|
+
}).strict(),
|
|
166
|
+
// Type 3: Reference to dynamic workflow YAML
|
|
167
|
+
z.object({
|
|
168
|
+
workflow: z.string(),
|
|
169
|
+
// Path to .yml workflow file
|
|
170
|
+
description: z.string().optional(),
|
|
171
|
+
input: z.record(z.string(), z.any()).optional()
|
|
172
|
+
// Default workflow input
|
|
173
|
+
}).strict(),
|
|
174
|
+
// Type 4: TypeScript script file (NEW)
|
|
175
|
+
z.object({
|
|
176
|
+
script: z.string(),
|
|
177
|
+
// Path to .ts file
|
|
178
|
+
description: z.string().optional(),
|
|
179
|
+
permissions: z.object({
|
|
180
|
+
fs: z.enum(["read", "write", "none"]).optional(),
|
|
181
|
+
network: z.boolean().optional(),
|
|
182
|
+
subprocess: z.boolean().optional()
|
|
183
|
+
}).optional(),
|
|
184
|
+
timeout: z.number().int().positive().max(36e5).optional(),
|
|
185
|
+
// Max 1 hour in milliseconds
|
|
186
|
+
memory: z.number().int().positive().min(64).max(8192).optional()
|
|
187
|
+
// 64MB-8GB in MB
|
|
188
|
+
}).strict()
|
|
189
|
+
]);
|
|
190
|
+
var mcpServerConfigSchema = z.object({
|
|
191
|
+
command: z.string(),
|
|
192
|
+
args: z.array(z.string()).optional(),
|
|
193
|
+
env: z.record(z.string(), z.string()).optional(),
|
|
194
|
+
tools: z.record(
|
|
195
|
+
z.string(),
|
|
196
|
+
z.boolean().or(
|
|
197
|
+
z.object({
|
|
198
|
+
provider: z.string().optional(),
|
|
199
|
+
model: z.string().optional(),
|
|
200
|
+
parameters: z.record(z.string(), z.unknown()).optional()
|
|
201
|
+
}).strict()
|
|
202
|
+
)
|
|
203
|
+
).optional()
|
|
204
|
+
}).strict();
|
|
149
205
|
var configSchema = z.object({
|
|
150
206
|
prices: z.record(
|
|
151
207
|
z.string(),
|
|
@@ -161,17 +217,7 @@ var configSchema = z.object({
|
|
|
161
217
|
})
|
|
162
218
|
)
|
|
163
219
|
).optional(),
|
|
164
|
-
providers: z.record(
|
|
165
|
-
z.string(),
|
|
166
|
-
z.object({
|
|
167
|
-
apiKey: z.string().optional(),
|
|
168
|
-
defaultModel: z.string().optional(),
|
|
169
|
-
defaultParameters: z.record(z.string(), z.any()).optional(),
|
|
170
|
-
location: z.string().optional(),
|
|
171
|
-
project: z.string().optional(),
|
|
172
|
-
keyFile: z.string().optional()
|
|
173
|
-
})
|
|
174
|
-
).optional(),
|
|
220
|
+
providers: z.record(z.string(), providerConfigSchema).optional(),
|
|
175
221
|
defaultProvider: z.string().optional(),
|
|
176
222
|
defaultModel: z.string().optional(),
|
|
177
223
|
defaultParameters: z.record(z.string(), z.any()).optional(),
|
|
@@ -180,34 +226,690 @@ var configSchema = z.object({
|
|
|
180
226
|
retryCount: z.number().int().min(0).optional(),
|
|
181
227
|
requestTimeoutSeconds: z.number().int().positive().optional(),
|
|
182
228
|
summaryThreshold: z.number().int().positive().optional(),
|
|
183
|
-
scripts: z.record(
|
|
184
|
-
z.string(),
|
|
185
|
-
z.string().or(
|
|
186
|
-
z.object({
|
|
187
|
-
command: z.string(),
|
|
188
|
-
description: z.string()
|
|
189
|
-
})
|
|
190
|
-
)
|
|
191
|
-
).optional(),
|
|
229
|
+
scripts: z.record(z.string(), scriptSchema).optional(),
|
|
192
230
|
commands: z.record(z.string(), providerModelSchema).optional(),
|
|
193
231
|
tools: z.object({
|
|
194
232
|
search: providerModelSchema.or(z.boolean()).optional()
|
|
195
233
|
}).optional(),
|
|
234
|
+
mcpServers: z.record(z.string(), mcpServerConfigSchema).optional(),
|
|
196
235
|
rules: z.array(ruleSchema).optional().or(z.string()).optional(),
|
|
197
236
|
excludeFiles: z.array(z.string()).optional()
|
|
198
237
|
}).strict().nullish();
|
|
199
238
|
|
|
200
|
-
// src/
|
|
239
|
+
// src/fs/node-provider.ts
|
|
240
|
+
import { existsSync } from "fs";
|
|
241
|
+
import { readdir, readFile, stat } from "fs/promises";
|
|
242
|
+
import { join, normalize } from "path";
|
|
243
|
+
var NodeFileSystemProvider = class {
|
|
244
|
+
constructor(_options = {}) {
|
|
245
|
+
this._options = _options;
|
|
246
|
+
}
|
|
247
|
+
exists(path) {
|
|
248
|
+
return existsSync(path);
|
|
249
|
+
}
|
|
250
|
+
async readdir(path) {
|
|
251
|
+
const entries = await readdir(path, { withFileTypes: true });
|
|
252
|
+
return entries.map((entry) => ({
|
|
253
|
+
name: entry.name,
|
|
254
|
+
isDirectory: entry.isDirectory(),
|
|
255
|
+
isFile: entry.isFile()
|
|
256
|
+
}));
|
|
257
|
+
}
|
|
258
|
+
async readFile(path) {
|
|
259
|
+
return readFile(path, "utf-8");
|
|
260
|
+
}
|
|
261
|
+
async readFileAsBuffer(path) {
|
|
262
|
+
const buffer = await readFile(path);
|
|
263
|
+
return new Uint8Array(buffer);
|
|
264
|
+
}
|
|
265
|
+
async stat(path) {
|
|
266
|
+
const stats = await stat(path);
|
|
267
|
+
return {
|
|
268
|
+
size: stats.size,
|
|
269
|
+
isDirectory: stats.isDirectory(),
|
|
270
|
+
isFile: stats.isFile()
|
|
271
|
+
};
|
|
272
|
+
}
|
|
273
|
+
join(...paths) {
|
|
274
|
+
return join(...paths);
|
|
275
|
+
}
|
|
276
|
+
normalize(path) {
|
|
277
|
+
return normalize(path);
|
|
278
|
+
}
|
|
279
|
+
};
|
|
280
|
+
|
|
281
|
+
// src/skills/constants.ts
|
|
282
|
+
var SKILL_LIMITS = {
|
|
283
|
+
MAX_FILE_SIZE: 1024 * 1024,
|
|
284
|
+
// 1MB per file
|
|
285
|
+
MAX_SKILL_SIZE: 10 * 1024 * 1024,
|
|
286
|
+
// 10MB total
|
|
287
|
+
MAX_DEPTH: 10,
|
|
288
|
+
// Maximum directory recursion depth
|
|
289
|
+
MAX_FILES: 500,
|
|
290
|
+
// Maximum files to load per skill
|
|
291
|
+
MIN_DESCRIPTION_LENGTH: 20,
|
|
292
|
+
// Minimum description length
|
|
293
|
+
MAX_DESCRIPTION_LENGTH: 1024,
|
|
294
|
+
// Maximum description length
|
|
295
|
+
MAX_NAME_LENGTH: 64
|
|
296
|
+
// Maximum skill name length
|
|
297
|
+
};
|
|
298
|
+
var IGNORED_DIRECTORIES = [
|
|
299
|
+
".git",
|
|
300
|
+
"node_modules",
|
|
301
|
+
".next",
|
|
302
|
+
".turbo",
|
|
303
|
+
"dist",
|
|
304
|
+
"build",
|
|
305
|
+
"coverage",
|
|
306
|
+
".cache",
|
|
307
|
+
".vscode",
|
|
308
|
+
".idea",
|
|
309
|
+
"tmp",
|
|
310
|
+
"temp",
|
|
311
|
+
".DS_Store"
|
|
312
|
+
];
|
|
313
|
+
var SUSPICIOUS_PATTERNS = [
|
|
314
|
+
/<script[^>]*>[\s\S]*?<\/script>/i,
|
|
315
|
+
// Script tags (with dotAll for multiline)
|
|
316
|
+
/javascript:/i,
|
|
317
|
+
// JavaScript URLs
|
|
318
|
+
/on\w+\s*=/i
|
|
319
|
+
// Event handlers (onclick, onload, etc.)
|
|
320
|
+
];
|
|
321
|
+
var SKILL_ERROR_MESSAGES = {
|
|
322
|
+
MISSING_FRONTMATTER: "SKILL.md must begin with YAML frontmatter enclosed in ---",
|
|
323
|
+
FRONTMATTER_INVALID: "Invalid frontmatter: {message}",
|
|
324
|
+
SKILL_NOT_FOUND: "Skill not found",
|
|
325
|
+
CONTEXT_NOT_INITIALIZED: "Skill context not initialized"
|
|
326
|
+
};
|
|
327
|
+
var SOURCE_ICONS = {
|
|
328
|
+
project: "\u{1F4C1}",
|
|
329
|
+
personal: "\u{1F3E0}",
|
|
330
|
+
plugin: "\u{1F50C}"
|
|
331
|
+
};
|
|
332
|
+
|
|
333
|
+
// src/skills/discovery.ts
|
|
334
|
+
import { homedir } from "os";
|
|
335
|
+
import { parse } from "yaml";
|
|
336
|
+
import { ZodError } from "zod";
|
|
337
|
+
|
|
338
|
+
// src/skills/types.ts
|
|
201
339
|
import { z as z2 } from "zod";
|
|
202
|
-
var
|
|
203
|
-
|
|
204
|
-
|
|
340
|
+
var skillMetadataSchema = z2.object({
|
|
341
|
+
name: z2.string().regex(/^[a-z0-9-]+$/, "Skill name must be lowercase letters, numbers, and hyphens").max(64, "Skill name must be at most 64 characters"),
|
|
342
|
+
description: z2.string().max(1024, "Description must be at most 1024 characters"),
|
|
343
|
+
allowedTools: z2.array(z2.string()).optional()
|
|
344
|
+
});
|
|
345
|
+
var SkillDiscoveryError = class extends Error {
|
|
346
|
+
constructor(message, path) {
|
|
347
|
+
super(message);
|
|
348
|
+
this.path = path;
|
|
349
|
+
this.name = "SkillDiscoveryError";
|
|
350
|
+
}
|
|
351
|
+
};
|
|
352
|
+
var SkillValidationError = class extends Error {
|
|
353
|
+
constructor(message, path) {
|
|
354
|
+
super(message);
|
|
355
|
+
this.path = path;
|
|
356
|
+
this.name = "SkillValidationError";
|
|
357
|
+
}
|
|
358
|
+
};
|
|
359
|
+
|
|
360
|
+
// src/skills/discovery.ts
|
|
361
|
+
var BINARY_EXTENSIONS = [
|
|
362
|
+
".png",
|
|
363
|
+
".jpg",
|
|
364
|
+
".jpeg",
|
|
365
|
+
".gif",
|
|
366
|
+
".bmp",
|
|
367
|
+
".ico",
|
|
368
|
+
".webp",
|
|
369
|
+
".pdf",
|
|
370
|
+
".zip",
|
|
371
|
+
".tar",
|
|
372
|
+
".gz",
|
|
373
|
+
".exe",
|
|
374
|
+
".dll",
|
|
375
|
+
".so",
|
|
376
|
+
".dylib",
|
|
377
|
+
".bin",
|
|
378
|
+
".dat",
|
|
379
|
+
".db",
|
|
380
|
+
".sqlite",
|
|
381
|
+
".woff",
|
|
382
|
+
".woff2",
|
|
383
|
+
".ttf",
|
|
384
|
+
".eot"
|
|
385
|
+
];
|
|
386
|
+
function isBinaryFile(filename) {
|
|
387
|
+
const ext = filename.toLowerCase().slice(filename.lastIndexOf("."));
|
|
388
|
+
return BINARY_EXTENSIONS.includes(ext);
|
|
389
|
+
}
|
|
390
|
+
function isBinaryContent(buffer) {
|
|
391
|
+
const checkLength = Math.min(buffer.length, 8192);
|
|
392
|
+
for (let i = 0; i < checkLength; i++) {
|
|
393
|
+
if (buffer[i] === 0) {
|
|
394
|
+
return true;
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
return false;
|
|
398
|
+
}
|
|
399
|
+
async function tryReadTextFile(filePath, fs) {
|
|
400
|
+
try {
|
|
401
|
+
const buffer = await fs.readFileAsBuffer(filePath);
|
|
402
|
+
if (isBinaryContent(buffer)) {
|
|
403
|
+
return null;
|
|
404
|
+
}
|
|
405
|
+
try {
|
|
406
|
+
return new TextDecoder("utf-8", { fatal: true }).decode(buffer);
|
|
407
|
+
} catch (_decodeError) {
|
|
408
|
+
return null;
|
|
409
|
+
}
|
|
410
|
+
} catch (error) {
|
|
411
|
+
if (error && typeof error === "object" && "code" in error && (error.code === "EINVAL" || error.code === "EISDIR")) {
|
|
412
|
+
return null;
|
|
413
|
+
}
|
|
414
|
+
throw error;
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
var SkillDiscoveryService = class {
|
|
418
|
+
fs;
|
|
419
|
+
personalSkillsDir;
|
|
420
|
+
projectSkillsDir;
|
|
421
|
+
pluginSkillsDirs;
|
|
422
|
+
constructor(options) {
|
|
423
|
+
this.fs = options.fs ?? new NodeFileSystemProvider();
|
|
424
|
+
this.personalSkillsDir = options.personalSkillsDir ?? this.fs.join(homedir(), ".claude", "skills");
|
|
425
|
+
this.projectSkillsDir = this.fs.join(options.cwd, ".claude", "skills");
|
|
426
|
+
this.pluginSkillsDirs = options.pluginSkillsDirs ?? [];
|
|
427
|
+
}
|
|
428
|
+
/**
|
|
429
|
+
* Discover all available skills from all sources
|
|
430
|
+
* Removes duplicates (project skills take priority over personal/plugin)
|
|
431
|
+
*/
|
|
432
|
+
async discoverAll() {
|
|
433
|
+
const skills = [];
|
|
434
|
+
const results = await Promise.allSettled([
|
|
435
|
+
this.discoverInDirectory(this.projectSkillsDir, "project"),
|
|
436
|
+
this.discoverInDirectory(this.personalSkillsDir, "personal"),
|
|
437
|
+
this.discoverPlugins()
|
|
438
|
+
]);
|
|
439
|
+
const projectSkills = results[0].status === "fulfilled" ? results[0].value : [];
|
|
440
|
+
const personalSkills = results[1].status === "fulfilled" ? results[1].value : [];
|
|
441
|
+
const pluginSkills = results[2].status === "fulfilled" ? results[2].value : [];
|
|
442
|
+
if (results[0].status === "rejected") {
|
|
443
|
+
console.warn(`Failed to load project skills: ${results[0].reason}`);
|
|
444
|
+
}
|
|
445
|
+
if (results[1].status === "rejected") {
|
|
446
|
+
console.warn(`Failed to load personal skills: ${results[1].reason}`);
|
|
447
|
+
}
|
|
448
|
+
if (results[2].status === "rejected") {
|
|
449
|
+
console.warn(`Failed to load plugin skills: ${results[2].reason}`);
|
|
450
|
+
}
|
|
451
|
+
const seenNames = /* @__PURE__ */ new Set();
|
|
452
|
+
const allSkills = [...projectSkills, ...personalSkills, ...pluginSkills];
|
|
453
|
+
for (const skill of allSkills) {
|
|
454
|
+
if (!seenNames.has(skill.metadata.name)) {
|
|
455
|
+
seenNames.add(skill.metadata.name);
|
|
456
|
+
skills.push(skill);
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
return skills;
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Discover skills in a specific directory
|
|
463
|
+
*/
|
|
464
|
+
async discoverInDirectory(dir, source) {
|
|
465
|
+
const exists = typeof this.fs.exists === "boolean" ? this.fs.exists(dir) : await this.fs.exists(dir);
|
|
466
|
+
if (!exists) {
|
|
467
|
+
return [];
|
|
468
|
+
}
|
|
469
|
+
const skills = [];
|
|
470
|
+
const entries = await this.fs.readdir(dir);
|
|
471
|
+
for (const entry of entries) {
|
|
472
|
+
if (!entry.isDirectory) {
|
|
473
|
+
continue;
|
|
474
|
+
}
|
|
475
|
+
const skillPath = this.fs.join(dir, entry.name);
|
|
476
|
+
const skillMdPath = this.fs.join(skillPath, "SKILL.md");
|
|
477
|
+
const skillMdExists = typeof this.fs.exists === "boolean" ? this.fs.exists(skillMdPath) : await this.fs.exists(skillMdPath);
|
|
478
|
+
if (!skillMdExists) {
|
|
479
|
+
continue;
|
|
480
|
+
}
|
|
481
|
+
try {
|
|
482
|
+
const content = await this.fs.readFile(skillMdPath);
|
|
483
|
+
const { metadata } = this.parseSkillMd(content);
|
|
484
|
+
skills.push({
|
|
485
|
+
metadata,
|
|
486
|
+
path: skillPath,
|
|
487
|
+
source
|
|
488
|
+
});
|
|
489
|
+
} catch (error) {
|
|
490
|
+
let message = "Unknown error";
|
|
491
|
+
let path = skillPath;
|
|
492
|
+
if (error instanceof SkillDiscoveryError) {
|
|
493
|
+
message = error.message;
|
|
494
|
+
path = error.path;
|
|
495
|
+
} else if (error instanceof ZodError) {
|
|
496
|
+
message = error.issues[0]?.message ?? "Invalid skill metadata";
|
|
497
|
+
} else if (error instanceof Error) {
|
|
498
|
+
message = error.message;
|
|
499
|
+
}
|
|
500
|
+
console.warn(`Warning: Failed to load skill at ${path}: ${message}`);
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
return skills;
|
|
504
|
+
}
|
|
505
|
+
/**
|
|
506
|
+
* Discover skills from plugin directories (node_modules)
|
|
507
|
+
*/
|
|
508
|
+
async discoverPlugins() {
|
|
509
|
+
const skills = [];
|
|
510
|
+
for (const pluginDir of this.pluginSkillsDirs) {
|
|
511
|
+
const exists = typeof this.fs.exists === "boolean" ? this.fs.exists(pluginDir) : await this.fs.exists(pluginDir);
|
|
512
|
+
if (!exists) {
|
|
513
|
+
continue;
|
|
514
|
+
}
|
|
515
|
+
const pluginSkills = await this.discoverInDirectory(pluginDir, "plugin");
|
|
516
|
+
skills.push(...pluginSkills);
|
|
517
|
+
}
|
|
518
|
+
return skills;
|
|
519
|
+
}
|
|
520
|
+
/**
|
|
521
|
+
* Load a single skill from its directory
|
|
522
|
+
*/
|
|
523
|
+
async loadSkill(skillPath, source) {
|
|
524
|
+
const skillMdPath = this.fs.join(skillPath, "SKILL.md");
|
|
525
|
+
const skillMdExists = typeof this.fs.exists === "boolean" ? this.fs.exists(skillMdPath) : await this.fs.exists(skillMdPath);
|
|
526
|
+
if (!skillMdExists) {
|
|
527
|
+
throw new SkillDiscoveryError("SKILL.md not found", skillPath);
|
|
528
|
+
}
|
|
529
|
+
const content = await this.fs.readFile(skillMdPath);
|
|
530
|
+
const { metadata, content: instructions } = this.parseSkillMd(content);
|
|
531
|
+
const files = /* @__PURE__ */ new Map();
|
|
532
|
+
let totalSize = 0;
|
|
533
|
+
const entries = await this.fs.readdir(skillPath);
|
|
534
|
+
for (const entry of entries) {
|
|
535
|
+
if (entry.name === "SKILL.md") {
|
|
536
|
+
continue;
|
|
537
|
+
}
|
|
538
|
+
const filePath = this.fs.join(skillPath, entry.name);
|
|
539
|
+
if (entry.isFile) {
|
|
540
|
+
if (isBinaryFile(entry.name)) {
|
|
541
|
+
continue;
|
|
542
|
+
}
|
|
543
|
+
const fileStats = await this.fs.stat(filePath);
|
|
544
|
+
const fileSize = fileStats.size;
|
|
545
|
+
if (fileSize > SKILL_LIMITS.MAX_FILE_SIZE) {
|
|
546
|
+
throw new SkillDiscoveryError(`File size exceeds limit (${fileSize} > ${SKILL_LIMITS.MAX_FILE_SIZE}): ${entry.name}`, skillPath);
|
|
547
|
+
}
|
|
548
|
+
if (totalSize + fileSize > SKILL_LIMITS.MAX_SKILL_SIZE) {
|
|
549
|
+
throw new SkillDiscoveryError(
|
|
550
|
+
`Total skill size exceeds limit (${totalSize + fileSize} > ${SKILL_LIMITS.MAX_SKILL_SIZE}): ${skillPath}`,
|
|
551
|
+
skillPath
|
|
552
|
+
);
|
|
553
|
+
}
|
|
554
|
+
const fileContent = await tryReadTextFile(filePath, this.fs);
|
|
555
|
+
if (fileContent === null) {
|
|
556
|
+
continue;
|
|
557
|
+
}
|
|
558
|
+
totalSize += fileSize;
|
|
559
|
+
files.set(entry.name, fileContent);
|
|
560
|
+
} else if (entry.isDirectory) {
|
|
561
|
+
totalSize = await this.loadDirectoryFiles(filePath, entry.name, files, 0, totalSize);
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
return {
|
|
565
|
+
metadata,
|
|
566
|
+
content: instructions,
|
|
567
|
+
files,
|
|
568
|
+
path: skillPath,
|
|
569
|
+
source
|
|
570
|
+
};
|
|
571
|
+
}
|
|
572
|
+
/**
|
|
573
|
+
* Parse SKILL.md content and extract frontmatter
|
|
574
|
+
*/
|
|
575
|
+
parseSkillMd(content) {
|
|
576
|
+
const frontmatterRegex = /^---\r?\n([\s\S]+?)\r?\n---\r?\n([\s\S]*)$/;
|
|
577
|
+
const match = content.match(frontmatterRegex);
|
|
578
|
+
if (!match || match.length < 3) {
|
|
579
|
+
throw new SkillDiscoveryError(SKILL_ERROR_MESSAGES.MISSING_FRONTMATTER, "");
|
|
580
|
+
}
|
|
581
|
+
const frontmatter = match[1] ?? "";
|
|
582
|
+
const instructions = match[2] ?? "";
|
|
583
|
+
const metadata = this.parseMetadata(frontmatter);
|
|
584
|
+
return { metadata, content: instructions };
|
|
585
|
+
}
|
|
586
|
+
/**
|
|
587
|
+
* Parse and validate YAML frontmatter
|
|
588
|
+
*/
|
|
589
|
+
parseMetadata(frontmatter) {
|
|
590
|
+
const parsed = parse(frontmatter);
|
|
591
|
+
return skillMetadataSchema.parse(parsed);
|
|
592
|
+
}
|
|
593
|
+
/**
|
|
594
|
+
* Recursively load files from a directory into the files map
|
|
595
|
+
* @returns The total size of all files loaded (in bytes)
|
|
596
|
+
*/
|
|
597
|
+
async loadDirectoryFiles(dirPath, prefix, files, depth = 0, currentTotal = 0) {
|
|
598
|
+
const { MAX_DEPTH, MAX_FILES } = SKILL_LIMITS;
|
|
599
|
+
let totalSize = currentTotal;
|
|
600
|
+
if (depth > MAX_DEPTH) {
|
|
601
|
+
return totalSize;
|
|
602
|
+
}
|
|
603
|
+
if (files.size >= MAX_FILES) {
|
|
604
|
+
return totalSize;
|
|
605
|
+
}
|
|
606
|
+
const currentDirName = prefix.split("/").pop() ?? prefix;
|
|
607
|
+
if (IGNORED_DIRECTORIES.includes(currentDirName)) {
|
|
608
|
+
return totalSize;
|
|
609
|
+
}
|
|
610
|
+
const entries = await this.fs.readdir(dirPath);
|
|
611
|
+
for (const entry of entries) {
|
|
612
|
+
if (files.size >= MAX_FILES) {
|
|
613
|
+
break;
|
|
614
|
+
}
|
|
615
|
+
const filePath = this.fs.join(dirPath, entry.name);
|
|
616
|
+
const key = `${prefix}/${entry.name}`.replace(/\/+/g, "/");
|
|
617
|
+
if (entry.isFile) {
|
|
618
|
+
if (isBinaryFile(entry.name)) {
|
|
619
|
+
continue;
|
|
620
|
+
}
|
|
621
|
+
const fileStats = await this.fs.stat(filePath);
|
|
622
|
+
const fileSize = fileStats.size;
|
|
623
|
+
if (fileSize > SKILL_LIMITS.MAX_FILE_SIZE) {
|
|
624
|
+
throw new SkillDiscoveryError(`File size exceeds limit (${fileSize} > ${SKILL_LIMITS.MAX_FILE_SIZE}): ${key}`, dirPath);
|
|
625
|
+
}
|
|
626
|
+
if (totalSize + fileSize > SKILL_LIMITS.MAX_SKILL_SIZE) {
|
|
627
|
+
throw new SkillDiscoveryError(
|
|
628
|
+
`Total skill size exceeds limit (${totalSize + fileSize} > ${SKILL_LIMITS.MAX_SKILL_SIZE}): ${dirPath}`,
|
|
629
|
+
dirPath
|
|
630
|
+
);
|
|
631
|
+
}
|
|
632
|
+
const content = await tryReadTextFile(filePath, this.fs);
|
|
633
|
+
if (content === null) {
|
|
634
|
+
continue;
|
|
635
|
+
}
|
|
636
|
+
totalSize += fileSize;
|
|
637
|
+
files.set(key, content);
|
|
638
|
+
} else if (entry.isDirectory) {
|
|
639
|
+
if (IGNORED_DIRECTORIES.includes(entry.name)) {
|
|
640
|
+
continue;
|
|
641
|
+
}
|
|
642
|
+
totalSize = await this.loadDirectoryFiles(filePath, key, files, depth + 1, totalSize);
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
return totalSize;
|
|
646
|
+
}
|
|
647
|
+
/**
|
|
648
|
+
* Create an initial skill context object
|
|
649
|
+
*/
|
|
650
|
+
async createContext() {
|
|
651
|
+
const availableSkills = await this.discoverAll();
|
|
652
|
+
return {
|
|
653
|
+
activeSkill: null,
|
|
654
|
+
availableSkills,
|
|
655
|
+
skillLoadingHistory: [],
|
|
656
|
+
loadSkill: async (name) => {
|
|
657
|
+
const ref = availableSkills.find((s) => s.metadata.name === name);
|
|
658
|
+
if (!ref) return null;
|
|
659
|
+
return this.loadSkill(ref.path, ref.source);
|
|
660
|
+
}
|
|
661
|
+
};
|
|
662
|
+
}
|
|
663
|
+
};
|
|
664
|
+
|
|
665
|
+
// src/skills/validation.ts
|
|
666
|
+
import { join as join2, normalize as normalize2 } from "path";
|
|
667
|
+
function validateSkillSecurity(skill) {
|
|
668
|
+
const { MAX_FILE_SIZE, MAX_SKILL_SIZE } = SKILL_LIMITS;
|
|
669
|
+
let totalSize = 0;
|
|
670
|
+
const contentSize = Buffer.byteLength(skill.content, "utf8");
|
|
671
|
+
if (contentSize > MAX_FILE_SIZE) {
|
|
672
|
+
throw new SkillValidationError(`SKILL.md content exceeds size limit (${contentSize} > ${MAX_FILE_SIZE})`, join2(skill.path, "SKILL.md"));
|
|
673
|
+
}
|
|
674
|
+
totalSize += contentSize;
|
|
675
|
+
for (const [filename, content] of skill.files) {
|
|
676
|
+
const fileSize = Buffer.byteLength(content, "utf8");
|
|
677
|
+
if (fileSize > MAX_FILE_SIZE) {
|
|
678
|
+
throw new SkillValidationError(`File ${filename} exceeds size limit (${fileSize} > ${MAX_FILE_SIZE})`, join2(skill.path, filename));
|
|
679
|
+
}
|
|
680
|
+
totalSize += fileSize;
|
|
681
|
+
}
|
|
682
|
+
if (totalSize > MAX_SKILL_SIZE) {
|
|
683
|
+
throw new SkillValidationError(`Skill total size exceeds limit (${totalSize} > ${MAX_SKILL_SIZE})`, skill.path);
|
|
684
|
+
}
|
|
685
|
+
validateContentSecurity(skill.content, skill.path);
|
|
686
|
+
for (const [filename, content] of skill.files) {
|
|
687
|
+
validateContentSecurity(content, join2(skill.path, filename));
|
|
688
|
+
}
|
|
689
|
+
}
|
|
690
|
+
function validateContentSecurity(content, path) {
|
|
691
|
+
for (const pattern of SUSPICIOUS_PATTERNS) {
|
|
692
|
+
if (pattern.test(content)) {
|
|
693
|
+
throw new SkillValidationError("Suspicious content detected", path);
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
function validateSkillReferences(skill) {
|
|
698
|
+
const warnings = [];
|
|
699
|
+
const externalRefs = skill.content.match(/https?:\/\/[^\s\])]+/g) || [];
|
|
700
|
+
if (externalRefs.length > 0 && !skill.metadata.description.toLowerCase().includes("external")) {
|
|
701
|
+
warnings.push(
|
|
702
|
+
`Skill '${skill.metadata.name}' contains external references. Consider adding 'external' to description for transparency.`
|
|
703
|
+
);
|
|
704
|
+
}
|
|
705
|
+
const codeBlocks = skill.content.match(/```[\s\S]*?```/g) || [];
|
|
706
|
+
for (const block of codeBlocks) {
|
|
707
|
+
const pathsInCode = block.match(/\/[a-zA-Z][\w./-]*/g) || [];
|
|
708
|
+
for (const path of pathsInCode) {
|
|
709
|
+
if (!path.startsWith("/dev") && !path.startsWith("/proc") && !path.startsWith("/sys") && !path.startsWith("//")) {
|
|
710
|
+
warnings.push(`Skill '${skill.metadata.name}' contains possible absolute path '${path}'. Use relative paths instead.`);
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
const linkRegex = /\[[^\]]+\]\(([^)]+(?:\s+[^)]+)*)\)/g;
|
|
715
|
+
let match;
|
|
716
|
+
match = linkRegex.exec(skill.content);
|
|
717
|
+
while (match !== null) {
|
|
718
|
+
const filepath = match[1];
|
|
719
|
+
match = linkRegex.exec(skill.content);
|
|
720
|
+
if (filepath.startsWith("http://") || filepath.startsWith("https://") || filepath.startsWith("#")) {
|
|
721
|
+
continue;
|
|
722
|
+
}
|
|
723
|
+
const normalizedPath = normalize2(filepath).replace(/^\.?\//, "").replace(/\\/g, "/");
|
|
724
|
+
if (!skill.files.has(normalizedPath)) {
|
|
725
|
+
warnings.push(`Referenced file not found: ${filepath}`);
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
return warnings;
|
|
729
|
+
}
|
|
730
|
+
function validateSkillMetadata(skill) {
|
|
731
|
+
const errors = [];
|
|
732
|
+
if (skill.metadata.description.length < 20) {
|
|
733
|
+
errors.push(`Description too short: ${skill.metadata.description.length} < 20`);
|
|
734
|
+
}
|
|
735
|
+
return errors;
|
|
736
|
+
}
|
|
737
|
+
function getSkillStats(skill) {
|
|
738
|
+
let totalSize = Buffer.byteLength(skill.content, "utf8");
|
|
739
|
+
let largestFile = { name: "SKILL.md", size: totalSize };
|
|
740
|
+
let fileCount = 1;
|
|
741
|
+
for (const [name, content] of skill.files) {
|
|
742
|
+
const size = Buffer.byteLength(content, "utf8");
|
|
743
|
+
totalSize += size;
|
|
744
|
+
fileCount++;
|
|
745
|
+
if (size > largestFile.size) {
|
|
746
|
+
largestFile = { name, size };
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
return {
|
|
750
|
+
totalSize,
|
|
751
|
+
fileCount,
|
|
752
|
+
largestFile
|
|
753
|
+
};
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
// src/skills/tools/listSkills.ts
|
|
757
|
+
import { z as z3 } from "zod";
|
|
758
|
+
var ListSkillsInputSchema = z3.object({
|
|
759
|
+
filter: z3.string().optional().describe("Optional filter string to match against skill names and descriptions")
|
|
760
|
+
});
|
|
761
|
+
var ListSkillsOutputSchema = z3.object({
|
|
762
|
+
skills: z3.array(
|
|
763
|
+
z3.object({
|
|
764
|
+
name: z3.string(),
|
|
765
|
+
description: z3.string(),
|
|
766
|
+
source: z3.enum(["personal", "project", "plugin"])
|
|
767
|
+
})
|
|
768
|
+
),
|
|
769
|
+
total: z3.number()
|
|
770
|
+
});
|
|
771
|
+
async function listSkills(input, context) {
|
|
772
|
+
const { filter } = input;
|
|
773
|
+
let skills = context.availableSkills;
|
|
774
|
+
if (filter) {
|
|
775
|
+
const filterLower = filter.toLowerCase();
|
|
776
|
+
skills = skills.filter((s) => s.metadata.name.includes(filterLower) || s.metadata.description.toLowerCase().includes(filterLower));
|
|
777
|
+
}
|
|
778
|
+
return {
|
|
779
|
+
skills: skills.map((s) => ({
|
|
780
|
+
name: s.metadata.name,
|
|
781
|
+
description: s.metadata.description,
|
|
782
|
+
source: s.source
|
|
783
|
+
})),
|
|
784
|
+
total: skills.length
|
|
785
|
+
};
|
|
786
|
+
}
|
|
787
|
+
var listSkillsToolInfo = {
|
|
788
|
+
name: "listSkills",
|
|
789
|
+
description: "List all available skills with their descriptions. Use this to discover what specialized capabilities are available.",
|
|
790
|
+
parameters: ListSkillsInputSchema,
|
|
791
|
+
returns: ListSkillsOutputSchema
|
|
792
|
+
};
|
|
793
|
+
|
|
794
|
+
// src/skills/tools/loadSkill.ts
|
|
795
|
+
import { z as z4 } from "zod";
|
|
796
|
+
var LoadSkillInputSchema = z4.object({
|
|
797
|
+
skillName: z4.string().describe("The name of the skill to load")
|
|
798
|
+
});
|
|
799
|
+
var LoadSkillOutputSchema = z4.object({
|
|
800
|
+
success: z4.boolean(),
|
|
801
|
+
skill: z4.object({
|
|
802
|
+
name: z4.string(),
|
|
803
|
+
description: z4.string(),
|
|
804
|
+
content: z4.string(),
|
|
805
|
+
availableFiles: z4.array(z4.string())
|
|
806
|
+
}).optional(),
|
|
807
|
+
error: z4.string().optional(),
|
|
808
|
+
warnings: z4.array(z4.string()).optional()
|
|
809
|
+
});
|
|
810
|
+
async function loadSkill(input, context) {
|
|
811
|
+
const { skillName } = input;
|
|
812
|
+
const skillRef = context.availableSkills.find((s) => s.metadata.name === skillName);
|
|
813
|
+
if (!skillRef) {
|
|
814
|
+
return {
|
|
815
|
+
success: false,
|
|
816
|
+
error: `Skill '${skillName}' not found`
|
|
817
|
+
};
|
|
818
|
+
}
|
|
819
|
+
try {
|
|
820
|
+
const skill = await context.loadSkill(skillName);
|
|
821
|
+
if (!skill) {
|
|
822
|
+
return {
|
|
823
|
+
success: false,
|
|
824
|
+
error: `Failed to load skill '${skillName}'`
|
|
825
|
+
};
|
|
826
|
+
}
|
|
827
|
+
validateSkillSecurity(skill);
|
|
828
|
+
const warnings = validateSkillReferences(skill);
|
|
829
|
+
context.activeSkill = skill;
|
|
830
|
+
context.skillLoadingHistory.push(skillName);
|
|
831
|
+
return {
|
|
832
|
+
success: true,
|
|
833
|
+
skill: {
|
|
834
|
+
name: skill.metadata.name,
|
|
835
|
+
description: skill.metadata.description,
|
|
836
|
+
content: skill.content,
|
|
837
|
+
availableFiles: Array.from(skill.files.keys())
|
|
838
|
+
},
|
|
839
|
+
warnings: warnings.length > 0 ? warnings : void 0
|
|
840
|
+
};
|
|
841
|
+
} catch (error) {
|
|
842
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
843
|
+
return {
|
|
844
|
+
success: false,
|
|
845
|
+
error: `Failed to load skill '${skillName}': ${message}`
|
|
846
|
+
};
|
|
847
|
+
}
|
|
848
|
+
}
|
|
849
|
+
var loadSkillToolInfo = {
|
|
850
|
+
name: "loadSkill",
|
|
851
|
+
description: "Load a skill by name to access its instructions and resources. Use this when you need specialized knowledge or capabilities for a specific task.",
|
|
852
|
+
parameters: LoadSkillInputSchema,
|
|
853
|
+
returns: LoadSkillOutputSchema
|
|
854
|
+
};
|
|
855
|
+
|
|
856
|
+
// src/skills/tools/readSkillFile.ts
|
|
857
|
+
import { z as z5 } from "zod";
|
|
858
|
+
var ReadSkillFileInputSchema = z5.object({
|
|
859
|
+
skillName: z5.string().describe("The name of the skill"),
|
|
860
|
+
filename: z5.string().describe('The name of the file to read (e.g., "reference.md", "scripts/helper.py")')
|
|
861
|
+
});
|
|
862
|
+
var ReadSkillFileOutputSchema = z5.object({
|
|
863
|
+
success: z5.boolean(),
|
|
864
|
+
content: z5.string().optional(),
|
|
865
|
+
error: z5.string().optional()
|
|
866
|
+
});
|
|
867
|
+
async function readSkillFile(input, context) {
|
|
868
|
+
const { skillName, filename } = input;
|
|
869
|
+
let skill = context.activeSkill && context.activeSkill.metadata.name === skillName ? context.activeSkill : null;
|
|
870
|
+
if (!skill) {
|
|
871
|
+
try {
|
|
872
|
+
skill = await context.loadSkill(skillName);
|
|
873
|
+
} catch (_error) {
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
if (!skill) {
|
|
877
|
+
return {
|
|
878
|
+
success: false,
|
|
879
|
+
error: `Skill '${skillName}' not found or could not be loaded. Use loadSkill first.`
|
|
880
|
+
};
|
|
881
|
+
}
|
|
882
|
+
if (!skill.files.has(filename)) {
|
|
883
|
+
const availableFiles = Array.from(skill.files.keys()).sort();
|
|
884
|
+
return {
|
|
885
|
+
success: false,
|
|
886
|
+
error: `File '${filename}' not found in skill '${skillName}'. Available files: ${availableFiles.join(", ") || "none"}`
|
|
887
|
+
};
|
|
888
|
+
}
|
|
889
|
+
const content = skill.files.get(filename);
|
|
890
|
+
return {
|
|
891
|
+
success: true,
|
|
892
|
+
content
|
|
893
|
+
};
|
|
894
|
+
}
|
|
895
|
+
var readSkillFileToolInfo = {
|
|
896
|
+
name: "readSkillFile",
|
|
897
|
+
description: "Read a supporting file from a skill. Use this to access reference documentation, examples, scripts, or templates bundled with a skill. First use loadSkill to see available files, then use this tool to read specific files.",
|
|
898
|
+
parameters: ReadSkillFileInputSchema,
|
|
899
|
+
returns: ReadSkillFileOutputSchema
|
|
900
|
+
};
|
|
901
|
+
|
|
902
|
+
// src/tools/askFollowupQuestion.ts
|
|
903
|
+
import { z as z6 } from "zod";
|
|
904
|
+
var questionObject = z6.object({
|
|
905
|
+
prompt: z6.string().describe("The text of the question.").meta({ usageValue: "question text here" }),
|
|
906
|
+
options: z6.array(z6.string()).default([]).describe("Ordered list of suggested answers (omit if none).").meta({ usageValue: "suggested answer here" })
|
|
205
907
|
});
|
|
206
908
|
var toolInfo = {
|
|
207
909
|
name: "askFollowupQuestion",
|
|
208
910
|
description: "Call this when vital details are missing. Pose each follow-up as one direct, unambiguous question. If it speeds the reply, add up to five short, mutually-exclusive answer options. Group any related questions in the same call to avoid a back-and-forth chain.",
|
|
209
|
-
parameters:
|
|
210
|
-
questions:
|
|
911
|
+
parameters: z6.object({
|
|
912
|
+
questions: z6.array(questionObject).describe("One or more follow-up questions you need answered before you can continue.").meta({ usageValue: "questions here" })
|
|
211
913
|
}).meta({
|
|
212
914
|
examples: [
|
|
213
915
|
{
|
|
@@ -295,20 +997,41 @@ var askFollowupQuestion_default = {
|
|
|
295
997
|
};
|
|
296
998
|
|
|
297
999
|
// src/tools/executeCommand.ts
|
|
298
|
-
import { z as
|
|
1000
|
+
import { z as z7 } from "zod";
|
|
1001
|
+
|
|
1002
|
+
// src/tools/utils.ts
|
|
1003
|
+
function createProviderError(action) {
|
|
1004
|
+
return {
|
|
1005
|
+
success: false,
|
|
1006
|
+
message: {
|
|
1007
|
+
type: "error-text",
|
|
1008
|
+
value: `Not possible to ${action}.`
|
|
1009
|
+
}
|
|
1010
|
+
};
|
|
1011
|
+
}
|
|
1012
|
+
function preprocessBoolean(val) {
|
|
1013
|
+
return typeof val === "string" ? val.toLowerCase() === "true" : val;
|
|
1014
|
+
}
|
|
1015
|
+
function createFileElement(tagName, path, content, attrs) {
|
|
1016
|
+
const allAttrs = { path, ...attrs };
|
|
1017
|
+
const attrStr = Object.entries(allAttrs).map(([k, v]) => ` ${k}="${v}"`).join("");
|
|
1018
|
+
if (content === void 0) {
|
|
1019
|
+
return `<${tagName}${attrStr} />`;
|
|
1020
|
+
}
|
|
1021
|
+
const isEmpty = content.trim().length === 0;
|
|
1022
|
+
if (isEmpty) {
|
|
1023
|
+
return `<${tagName}${attrStr} is_empty="true" />`;
|
|
1024
|
+
}
|
|
1025
|
+
return `<${tagName}${attrStr}>${content}</${tagName}>`;
|
|
1026
|
+
}
|
|
1027
|
+
|
|
1028
|
+
// src/tools/executeCommand.ts
|
|
299
1029
|
var toolInfo2 = {
|
|
300
1030
|
name: "executeCommand",
|
|
301
1031
|
description: "Run a single CLI command. The command is always executed in the project-root working directory (regardless of earlier commands). Prefer one-off shell commands over wrapper scripts for flexibility. **IMPORTANT**: After an `execute_command` call, you MUST stop and NOT allowed to make further tool calls in the same message.",
|
|
302
|
-
parameters:
|
|
303
|
-
command:
|
|
304
|
-
requiresApproval:
|
|
305
|
-
if (typeof val === "string") {
|
|
306
|
-
const lower = val.toLowerCase();
|
|
307
|
-
if (lower === "false") return false;
|
|
308
|
-
if (lower === "true") return true;
|
|
309
|
-
}
|
|
310
|
-
return val;
|
|
311
|
-
}, z3.boolean().optional().default(false)).describe(
|
|
1032
|
+
parameters: z7.object({
|
|
1033
|
+
command: z7.string().describe("The exact command to run (valid for the current OS). It must be correctly formatted and free of harmful instructions.").meta({ usageValue: "your-command-here" }),
|
|
1034
|
+
requiresApproval: z7.preprocess(preprocessBoolean, z7.boolean().optional().default(false)).describe(
|
|
312
1035
|
"Set to `true` for commands that install/uninstall software, modify or delete files, change system settings, perform network operations, or have other side effects. Use `false` for safe, read-only, or purely local development actions (e.g., listing files, make a build, running tests)."
|
|
313
1036
|
).meta({ usageValue: "true | false" })
|
|
314
1037
|
}).meta({
|
|
@@ -325,13 +1048,7 @@ var toolInfo2 = {
|
|
|
325
1048
|
};
|
|
326
1049
|
var handler2 = async (provider, args) => {
|
|
327
1050
|
if (!provider.executeCommand) {
|
|
328
|
-
return
|
|
329
|
-
success: false,
|
|
330
|
-
message: {
|
|
331
|
-
type: "error-text",
|
|
332
|
-
value: "Not possible to execute command. Abort."
|
|
333
|
-
}
|
|
334
|
-
};
|
|
1051
|
+
return createProviderError("execute command. Abort");
|
|
335
1052
|
}
|
|
336
1053
|
const { command, requiresApproval } = toolInfo2.parameters.parse(args);
|
|
337
1054
|
try {
|
|
@@ -385,16 +1102,16 @@ var executeCommand_default = {
|
|
|
385
1102
|
};
|
|
386
1103
|
|
|
387
1104
|
// src/tools/fetchUrl.ts
|
|
388
|
-
import { z as
|
|
1105
|
+
import { z as z8 } from "zod";
|
|
389
1106
|
var toolInfo3 = {
|
|
390
1107
|
name: "fetchUrl",
|
|
391
1108
|
description: "Fetch the content located at one or more HTTP(S) URLs and return it in Markdown format. This works for standard web pages as well as raw files (e.g. README.md, source code) hosted on platforms like GitHub.",
|
|
392
|
-
parameters:
|
|
393
|
-
url:
|
|
1109
|
+
parameters: z8.object({
|
|
1110
|
+
url: z8.preprocess((val) => {
|
|
394
1111
|
if (!val) return [];
|
|
395
1112
|
const values = Array.isArray(val) ? val : [val];
|
|
396
1113
|
return values.flatMap((i) => typeof i === "string" ? i.split(",") : []).filter((s) => s.length > 0);
|
|
397
|
-
},
|
|
1114
|
+
}, z8.array(z8.string())).describe("One or more URLs to fetch, separated by commas if multiple.").meta({ usageValue: "url" })
|
|
398
1115
|
}).meta({
|
|
399
1116
|
examples: [
|
|
400
1117
|
{
|
|
@@ -463,29 +1180,15 @@ var fetchUrl_default = {
|
|
|
463
1180
|
};
|
|
464
1181
|
|
|
465
1182
|
// src/tools/listFiles.ts
|
|
466
|
-
import { z as
|
|
1183
|
+
import { z as z9 } from "zod";
|
|
467
1184
|
var toolInfo4 = {
|
|
468
1185
|
name: "listFiles",
|
|
469
1186
|
description: "Request to list files and directories within the specified directory. If recursive is true, it will list all files and directories recursively. If recursive is false or not provided, it will only list the top-level contents. Do not use this tool to confirm the existence of files you may have created, as the user will let you know if the files were created successfully or not.",
|
|
470
|
-
parameters:
|
|
471
|
-
path:
|
|
472
|
-
maxCount:
|
|
473
|
-
recursive:
|
|
474
|
-
|
|
475
|
-
const lower = val.toLowerCase();
|
|
476
|
-
if (lower === "false") return false;
|
|
477
|
-
if (lower === "true") return true;
|
|
478
|
-
}
|
|
479
|
-
return val;
|
|
480
|
-
}, z5.boolean().optional().default(true)).describe("Whether to list files recursively. Use true for recursive listing, false or omit for top-level only.").meta({ usageValue: "true or false (optional)" }),
|
|
481
|
-
includeIgnored: z5.preprocess((val) => {
|
|
482
|
-
if (typeof val === "string") {
|
|
483
|
-
const lower = val.toLowerCase();
|
|
484
|
-
if (lower === "false") return false;
|
|
485
|
-
if (lower === "true") return true;
|
|
486
|
-
}
|
|
487
|
-
return val;
|
|
488
|
-
}, z5.boolean().optional().default(false)).describe("Whether to include ignored files. Use true to include files ignored by .gitignore.").meta({ usageValue: "true or false (optional)" })
|
|
1187
|
+
parameters: z9.object({
|
|
1188
|
+
path: z9.string().describe("The path of the directory to list contents for (relative to the current working directory)").meta({ usageValue: "Directory path here" }),
|
|
1189
|
+
maxCount: z9.coerce.number().optional().default(2e3).describe("The maximum number of files to list. Default to 2000").meta({ usageValue: "Maximum number of files to list (optional)" }),
|
|
1190
|
+
recursive: z9.preprocess(preprocessBoolean, z9.boolean().optional().default(true)).describe("Whether to list files recursively. Use true for recursive listing, false or omit for top-level only.").meta({ usageValue: "true or false (optional)" }),
|
|
1191
|
+
includeIgnored: z9.preprocess(preprocessBoolean, z9.boolean().optional().default(false)).describe("Whether to include ignored files. Use true to include files ignored by .gitignore.").meta({ usageValue: "true or false (optional)" })
|
|
489
1192
|
}).meta({
|
|
490
1193
|
examples: [
|
|
491
1194
|
{
|
|
@@ -500,13 +1203,7 @@ var toolInfo4 = {
|
|
|
500
1203
|
};
|
|
501
1204
|
var handler4 = async (provider, args) => {
|
|
502
1205
|
if (!provider.listFiles) {
|
|
503
|
-
return
|
|
504
|
-
success: false,
|
|
505
|
-
message: {
|
|
506
|
-
type: "error-text",
|
|
507
|
-
value: "Not possible to list files."
|
|
508
|
-
}
|
|
509
|
-
};
|
|
1206
|
+
return createProviderError("list files");
|
|
510
1207
|
}
|
|
511
1208
|
const { path, maxCount, recursive, includeIgnored } = toolInfo4.parameters.parse(args);
|
|
512
1209
|
const [files, limitReached] = await provider.listFiles(path, recursive, maxCount, includeIgnored);
|
|
@@ -589,12 +1286,12 @@ var MockProvider = class {
|
|
|
589
1286
|
};
|
|
590
1287
|
|
|
591
1288
|
// src/tools/readBinaryFile.ts
|
|
592
|
-
import { z as
|
|
1289
|
+
import { z as z10 } from "zod";
|
|
593
1290
|
var toolInfo5 = {
|
|
594
1291
|
name: "readBinaryFile",
|
|
595
1292
|
description: "Read a binary file from a URL or local path. Use file:// prefix to access local files. This can be used to access non-text files such as PDFs or images.",
|
|
596
|
-
parameters:
|
|
597
|
-
url:
|
|
1293
|
+
parameters: z10.object({
|
|
1294
|
+
url: z10.string().describe("The URL or local path of the file to read.")
|
|
598
1295
|
})
|
|
599
1296
|
};
|
|
600
1297
|
var handler5 = async (provider, args) => {
|
|
@@ -641,24 +1338,17 @@ var readBinaryFile_default = {
|
|
|
641
1338
|
};
|
|
642
1339
|
|
|
643
1340
|
// src/tools/readFile.ts
|
|
644
|
-
import { z as
|
|
1341
|
+
import { z as z11 } from "zod";
|
|
645
1342
|
var toolInfo6 = {
|
|
646
1343
|
name: "readFile",
|
|
647
1344
|
description: "Request to read the contents of one or multiple files at the specified paths. Use comma separated paths to read multiple files. Use this when you need to examine the contents of an existing file you do not know the contents of, for example to analyze code, review text files, or extract information from configuration files. May not be suitable for other types of binary files, as it returns the raw content as a string. Try to list all the potential files are relevent to the task, and then use this tool to read all the relevant files.",
|
|
648
|
-
parameters:
|
|
649
|
-
path:
|
|
1345
|
+
parameters: z11.object({
|
|
1346
|
+
path: z11.preprocess((val) => {
|
|
650
1347
|
if (!val) return [];
|
|
651
1348
|
const values = Array.isArray(val) ? val : [val];
|
|
652
1349
|
return values.flatMap((i) => typeof i === "string" ? i.split(",") : []).filter((s) => s.length > 0);
|
|
653
|
-
},
|
|
654
|
-
includeIgnored:
|
|
655
|
-
if (typeof val === "string") {
|
|
656
|
-
const lower = val.toLowerCase();
|
|
657
|
-
if (lower === "false") return false;
|
|
658
|
-
if (lower === "true") return true;
|
|
659
|
-
}
|
|
660
|
-
return val;
|
|
661
|
-
}, z7.boolean().nullish().default(false)).describe("Whether to include ignored files. Use true to include files ignored by .gitignore.").meta({ usageValue: "true or false (optional)" })
|
|
1350
|
+
}, z11.array(z11.string())).describe("The path of the file to read").meta({ usageValue: "Comma separated paths here" }),
|
|
1351
|
+
includeIgnored: z11.preprocess(preprocessBoolean, z11.boolean().nullish().default(false)).describe("Whether to include ignored files. Use true to include files ignored by .gitignore.").meta({ usageValue: "true or false (optional)" })
|
|
662
1352
|
}).meta({
|
|
663
1353
|
examples: [
|
|
664
1354
|
{
|
|
@@ -678,27 +1368,16 @@ var toolInfo6 = {
|
|
|
678
1368
|
};
|
|
679
1369
|
var handler6 = async (provider, args) => {
|
|
680
1370
|
if (!provider.readFile) {
|
|
681
|
-
return
|
|
682
|
-
success: false,
|
|
683
|
-
message: {
|
|
684
|
-
type: "error-text",
|
|
685
|
-
value: "Not possible to read file."
|
|
686
|
-
}
|
|
687
|
-
};
|
|
1371
|
+
return createProviderError("read file");
|
|
688
1372
|
}
|
|
689
1373
|
const { path: paths, includeIgnored } = toolInfo6.parameters.parse(args);
|
|
690
1374
|
const resp = [];
|
|
691
1375
|
for (const path of paths) {
|
|
692
1376
|
const fileContent = await provider.readFile(path, includeIgnored ?? false);
|
|
693
1377
|
if (!fileContent) {
|
|
694
|
-
resp.push(
|
|
1378
|
+
resp.push(createFileElement("read_file_file_content", path, void 0, { file_not_found: "true" }));
|
|
695
1379
|
} else {
|
|
696
|
-
|
|
697
|
-
if (isEmpty) {
|
|
698
|
-
resp.push(`<read_file_file_content path="${path}" is_empty="true" />`);
|
|
699
|
-
} else {
|
|
700
|
-
resp.push(`<read_file_file_content path="${path}">${fileContent}</read_file_file_content>`);
|
|
701
|
-
}
|
|
1380
|
+
resp.push(createFileElement("read_file_file_content", path, fileContent));
|
|
702
1381
|
}
|
|
703
1382
|
}
|
|
704
1383
|
return {
|
|
@@ -715,12 +1394,12 @@ var readFile_default = {
|
|
|
715
1394
|
};
|
|
716
1395
|
|
|
717
1396
|
// src/tools/removeFile.ts
|
|
718
|
-
import { z as
|
|
1397
|
+
import { z as z12 } from "zod";
|
|
719
1398
|
var toolInfo7 = {
|
|
720
1399
|
name: "removeFile",
|
|
721
1400
|
description: "Request to remove a file at the specified path.",
|
|
722
|
-
parameters:
|
|
723
|
-
path:
|
|
1401
|
+
parameters: z12.object({
|
|
1402
|
+
path: z12.string().describe("The path of the file to remove").meta({ usageValue: "File path here" })
|
|
724
1403
|
}).meta({
|
|
725
1404
|
examples: [
|
|
726
1405
|
{
|
|
@@ -734,13 +1413,7 @@ var toolInfo7 = {
|
|
|
734
1413
|
};
|
|
735
1414
|
var handler7 = async (provider, args) => {
|
|
736
1415
|
if (!provider.removeFile) {
|
|
737
|
-
return
|
|
738
|
-
success: false,
|
|
739
|
-
message: {
|
|
740
|
-
type: "error-text",
|
|
741
|
-
value: "Not possible to remove file."
|
|
742
|
-
}
|
|
743
|
-
};
|
|
1416
|
+
return createProviderError("remove file");
|
|
744
1417
|
}
|
|
745
1418
|
const parsed = toolInfo7.parameters.safeParse(args);
|
|
746
1419
|
if (!parsed.success) {
|
|
@@ -768,13 +1441,13 @@ var removeFile_default = {
|
|
|
768
1441
|
};
|
|
769
1442
|
|
|
770
1443
|
// src/tools/renameFile.ts
|
|
771
|
-
import { z as
|
|
1444
|
+
import { z as z13 } from "zod";
|
|
772
1445
|
var toolInfo8 = {
|
|
773
1446
|
name: "renameFile",
|
|
774
1447
|
description: "Request to rename a file from source path to target path.",
|
|
775
|
-
parameters:
|
|
776
|
-
source_path:
|
|
777
|
-
target_path:
|
|
1448
|
+
parameters: z13.object({
|
|
1449
|
+
source_path: z13.string().describe("The current path of the file").meta({ usageValue: "Source file path here" }),
|
|
1450
|
+
target_path: z13.string().describe("The new path for the file").meta({ usageValue: "Target file path here" })
|
|
778
1451
|
}).meta({
|
|
779
1452
|
examples: [
|
|
780
1453
|
{
|
|
@@ -813,7 +1486,7 @@ var renameFile_default = {
|
|
|
813
1486
|
};
|
|
814
1487
|
|
|
815
1488
|
// src/tools/replaceInFile.ts
|
|
816
|
-
import { z as
|
|
1489
|
+
import { z as z14 } from "zod";
|
|
817
1490
|
|
|
818
1491
|
// src/tools/utils/replaceInFile.ts
|
|
819
1492
|
var replaceInFile = (fileContent, diff) => {
|
|
@@ -856,10 +1529,27 @@ var replaceInFile = (fileContent, diff) => {
|
|
|
856
1529
|
}
|
|
857
1530
|
runningIndex++;
|
|
858
1531
|
}
|
|
859
|
-
const
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
1532
|
+
const searchWords = trimmedSearch.replace(/\s+/g, " ").split(" ").filter((w) => w.length > 0);
|
|
1533
|
+
let matchStartPos = -1;
|
|
1534
|
+
let matchEndPos = -1;
|
|
1535
|
+
if (searchWords.length > 0) {
|
|
1536
|
+
const firstWordPos = content.indexOf(searchWords[0], offset);
|
|
1537
|
+
if (firstWordPos !== -1) {
|
|
1538
|
+
matchStartPos = firstWordPos;
|
|
1539
|
+
matchEndPos = firstWordPos + searchWords[0].length;
|
|
1540
|
+
for (let i = 1; i < searchWords.length; i++) {
|
|
1541
|
+
const nextWordPos = content.indexOf(searchWords[i], matchEndPos);
|
|
1542
|
+
if (nextWordPos === -1 || nextWordPos > matchEndPos + 100) {
|
|
1543
|
+
matchStartPos = -1;
|
|
1544
|
+
break;
|
|
1545
|
+
}
|
|
1546
|
+
matchEndPos = nextWordPos + searchWords[i].length;
|
|
1547
|
+
}
|
|
1548
|
+
}
|
|
1549
|
+
}
|
|
1550
|
+
if (matchStartPos !== -1 && matchEndPos !== -1) {
|
|
1551
|
+
return content.slice(0, matchStartPos) + replace + content.slice(matchEndPos);
|
|
1552
|
+
}
|
|
863
1553
|
}
|
|
864
1554
|
return null;
|
|
865
1555
|
};
|
|
@@ -893,9 +1583,9 @@ var replaceInFile = (fileContent, diff) => {
|
|
|
893
1583
|
var toolInfo9 = {
|
|
894
1584
|
name: "replaceInFile",
|
|
895
1585
|
description: "Request to replace sections of content in an existing file using SEARCH/REPLACE blocks that define exact changes to specific parts of the file. This tool should be used when you need to make targeted changes to specific parts of a file.",
|
|
896
|
-
parameters:
|
|
897
|
-
path:
|
|
898
|
-
diff:
|
|
1586
|
+
parameters: z14.object({
|
|
1587
|
+
path: z14.string().describe("The path of the file to modify").meta({ usageValue: "File path here" }),
|
|
1588
|
+
diff: z14.string().describe(
|
|
899
1589
|
`One or more SEARCH/REPLACE blocks following this exact format:
|
|
900
1590
|
\`\`\`
|
|
901
1591
|
<<<<<<< SEARCH
|
|
@@ -1081,12 +1771,12 @@ var replaceInFile_default = {
|
|
|
1081
1771
|
};
|
|
1082
1772
|
|
|
1083
1773
|
// src/tools/search.ts
|
|
1084
|
-
import { z as
|
|
1774
|
+
import { z as z15 } from "zod";
|
|
1085
1775
|
var toolInfo10 = {
|
|
1086
1776
|
name: "search",
|
|
1087
1777
|
description: "Search the web for information using Google Search. Use this tool to find current information, facts, news, documentation, or research that is not available in your training data. Returns comprehensive search results with relevant content extracted from the web.",
|
|
1088
|
-
parameters:
|
|
1089
|
-
query:
|
|
1778
|
+
parameters: z15.object({
|
|
1779
|
+
query: z15.string().describe("The query to search for").meta({ usageValue: "Your search query here" })
|
|
1090
1780
|
}).meta({
|
|
1091
1781
|
examples: [
|
|
1092
1782
|
{
|
|
@@ -1136,18 +1826,18 @@ var search_default = {
|
|
|
1136
1826
|
};
|
|
1137
1827
|
|
|
1138
1828
|
// src/tools/searchFiles.ts
|
|
1139
|
-
import { z as
|
|
1829
|
+
import { z as z16 } from "zod";
|
|
1140
1830
|
var toolInfo11 = {
|
|
1141
1831
|
name: "searchFiles",
|
|
1142
1832
|
description: "Request to perform a regex search across files in a specified directory, outputting context-rich results that include surrounding lines. This tool searches for patterns or specific content across multiple files, displaying each match with encapsulating context.",
|
|
1143
|
-
parameters:
|
|
1144
|
-
path:
|
|
1833
|
+
parameters: z16.object({
|
|
1834
|
+
path: z16.string().describe(
|
|
1145
1835
|
"The path of the directory to search in (relative to the current working directory). This directory will be recursively searched."
|
|
1146
1836
|
).meta({ usageValue: "Directory path here" }),
|
|
1147
|
-
regex:
|
|
1837
|
+
regex: z16.string().describe("The regular expression pattern to search for. Uses Rust regex syntax.").meta({
|
|
1148
1838
|
usageValue: "Your regex pattern here"
|
|
1149
1839
|
}),
|
|
1150
|
-
filePattern:
|
|
1840
|
+
filePattern: z16.string().optional().describe(
|
|
1151
1841
|
'Comma-separated glob pattern to filter files (e.g., "*.ts" for TypeScript files or "*.ts,*.js" for both TypeScript and JavaScript files). If not provided, it will search all files (*).'
|
|
1152
1842
|
).meta({
|
|
1153
1843
|
usageValue: "file pattern here (optional)"
|
|
@@ -1217,20 +1907,20 @@ var searchFiles_default = {
|
|
|
1217
1907
|
};
|
|
1218
1908
|
|
|
1219
1909
|
// src/tools/todo.ts
|
|
1220
|
-
import { z as
|
|
1221
|
-
var TodoStatus =
|
|
1222
|
-
var TodoItemSchema =
|
|
1223
|
-
id:
|
|
1224
|
-
title:
|
|
1225
|
-
description:
|
|
1910
|
+
import { z as z17 } from "zod";
|
|
1911
|
+
var TodoStatus = z17.enum(["open", "completed", "closed"]);
|
|
1912
|
+
var TodoItemSchema = z17.object({
|
|
1913
|
+
id: z17.string(),
|
|
1914
|
+
title: z17.string(),
|
|
1915
|
+
description: z17.string(),
|
|
1226
1916
|
status: TodoStatus
|
|
1227
1917
|
});
|
|
1228
|
-
var UpdateTodoItemInputSchema =
|
|
1229
|
-
operation:
|
|
1230
|
-
id:
|
|
1231
|
-
parentId:
|
|
1232
|
-
title:
|
|
1233
|
-
description:
|
|
1918
|
+
var UpdateTodoItemInputSchema = z17.object({
|
|
1919
|
+
operation: z17.enum(["add", "update"]),
|
|
1920
|
+
id: z17.string().nullish(),
|
|
1921
|
+
parentId: z17.string().nullish(),
|
|
1922
|
+
title: z17.string().nullish(),
|
|
1923
|
+
description: z17.string().nullish(),
|
|
1234
1924
|
status: TodoStatus.nullish()
|
|
1235
1925
|
}).superRefine((data, ctx) => {
|
|
1236
1926
|
if (data.operation === "add") {
|
|
@@ -1251,18 +1941,18 @@ var UpdateTodoItemInputSchema = z13.object({
|
|
|
1251
1941
|
}
|
|
1252
1942
|
}
|
|
1253
1943
|
});
|
|
1254
|
-
var UpdateTodoItemOutputSchema =
|
|
1255
|
-
id:
|
|
1944
|
+
var UpdateTodoItemOutputSchema = z17.object({
|
|
1945
|
+
id: z17.string()
|
|
1256
1946
|
});
|
|
1257
1947
|
|
|
1258
1948
|
// src/tools/writeToFile.ts
|
|
1259
|
-
import { z as
|
|
1949
|
+
import { z as z18 } from "zod";
|
|
1260
1950
|
var toolInfo12 = {
|
|
1261
1951
|
name: "writeToFile",
|
|
1262
1952
|
description: "Request to write content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Ensure that the output content does not include incorrect escaped character patterns such as `<`, `>`, or `&`. Also ensure there is no unwanted CDATA tags in the content.",
|
|
1263
|
-
parameters:
|
|
1264
|
-
path:
|
|
1265
|
-
content:
|
|
1953
|
+
parameters: z18.object({
|
|
1954
|
+
path: z18.string().describe("The path of the file to write to").meta({ usageValue: "File path here" }),
|
|
1955
|
+
content: z18.string().describe(
|
|
1266
1956
|
"The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified."
|
|
1267
1957
|
).meta({ usageValue: "Your file content here" })
|
|
1268
1958
|
}).meta({
|
|
@@ -1290,13 +1980,7 @@ export default App;
|
|
|
1290
1980
|
};
|
|
1291
1981
|
var handler12 = async (provider, args) => {
|
|
1292
1982
|
if (!provider.writeFile) {
|
|
1293
|
-
return
|
|
1294
|
-
success: false,
|
|
1295
|
-
message: {
|
|
1296
|
-
type: "error-text",
|
|
1297
|
-
value: "Not possible to write file."
|
|
1298
|
-
}
|
|
1299
|
-
};
|
|
1983
|
+
return createProviderError("write file");
|
|
1300
1984
|
}
|
|
1301
1985
|
const parsed = toolInfo12.parameters.safeParse(args);
|
|
1302
1986
|
if (!parsed.success) {
|
|
@@ -1310,7 +1994,7 @@ var handler12 = async (provider, args) => {
|
|
|
1310
1994
|
}
|
|
1311
1995
|
let { path, content } = parsed.data;
|
|
1312
1996
|
const trimmedContent = content.trim();
|
|
1313
|
-
if (trimmedContent.startsWith("<![CDATA[") &&
|
|
1997
|
+
if (trimmedContent.startsWith("<![CDATA[") && trimmedContent.endsWith("]]>")) content = trimmedContent.slice(9, -3);
|
|
1314
1998
|
await provider.writeFile(path, content);
|
|
1315
1999
|
return {
|
|
1316
2000
|
success: true,
|
|
@@ -1347,7 +2031,7 @@ var UsageMeter = class {
|
|
|
1347
2031
|
this.#maxMessages = opts.maxMessages ?? 1e3;
|
|
1348
2032
|
this.#maxCost = opts.maxCost ?? 100;
|
|
1349
2033
|
}
|
|
1350
|
-
#
|
|
2034
|
+
#calculateUsage(usage, providerMetadata, modelInfo) {
|
|
1351
2035
|
const providerMetadataKey = Object.keys(providerMetadata ?? {})[0];
|
|
1352
2036
|
const metadata = providerMetadata?.[providerMetadataKey] ?? {};
|
|
1353
2037
|
switch (providerMetadataKey) {
|
|
@@ -1405,7 +2089,7 @@ var UsageMeter = class {
|
|
|
1405
2089
|
cacheReadsPrice: 0
|
|
1406
2090
|
};
|
|
1407
2091
|
const usage = "totalUsage" in resp ? resp.totalUsage : resp.usage;
|
|
1408
|
-
const result = this.#
|
|
2092
|
+
const result = this.#calculateUsage(usage, resp.providerMetadata, modelInfo);
|
|
1409
2093
|
this.#totals.input += result.input || 0;
|
|
1410
2094
|
this.#totals.output += result.output || 0;
|
|
1411
2095
|
this.#totals.cachedRead += result.cachedRead || 0;
|
|
@@ -1475,7 +2159,7 @@ var UsageMeter = class {
|
|
|
1475
2159
|
|
|
1476
2160
|
// src/workflow/agent.workflow.ts
|
|
1477
2161
|
import { jsonSchema } from "ai";
|
|
1478
|
-
import { toJSONSchema, z as
|
|
2162
|
+
import { toJSONSchema, z as z19 } from "zod";
|
|
1479
2163
|
|
|
1480
2164
|
// src/workflow/types.ts
|
|
1481
2165
|
var TaskEventKind = /* @__PURE__ */ ((TaskEventKind2) => {
|
|
@@ -1547,9 +2231,9 @@ var agentWorkflow = async (input, { step, tools, logger }) => {
|
|
|
1547
2231
|
await event(`end-round-${i}`, { kind: "EndRequest" /* EndRequest */, message: textContent });
|
|
1548
2232
|
if (toolCalls.length === 0) {
|
|
1549
2233
|
if (!input.outputSchema) {
|
|
1550
|
-
const
|
|
1551
|
-
await event("end-task", { kind: "EndTask" /* EndTask */, exitReason:
|
|
1552
|
-
return
|
|
2234
|
+
const exitReason3 = { type: "Exit", message: textContent, messages };
|
|
2235
|
+
await event("end-task", { kind: "EndTask" /* EndTask */, exitReason: exitReason3 });
|
|
2236
|
+
return exitReason3;
|
|
1553
2237
|
}
|
|
1554
2238
|
const parsed = parseJsonFromMarkdown(textContent);
|
|
1555
2239
|
if (!parsed.success) {
|
|
@@ -1559,13 +2243,13 @@ var agentWorkflow = async (input, { step, tools, logger }) => {
|
|
|
1559
2243
|
}
|
|
1560
2244
|
const validated = input.outputSchema.safeParse(parsed.data);
|
|
1561
2245
|
if (!validated.success) {
|
|
1562
|
-
const errorMessage = `Output validation failed. Error: ${
|
|
2246
|
+
const errorMessage = `Output validation failed. Error: ${z19.prettifyError(validated.error)}. Please correct the output.`;
|
|
1563
2247
|
nextMessage = [{ role: "user", content: errorMessage }];
|
|
1564
2248
|
continue;
|
|
1565
2249
|
}
|
|
1566
|
-
const
|
|
1567
|
-
await event("end-task", { kind: "EndTask" /* EndTask */, exitReason });
|
|
1568
|
-
return
|
|
2250
|
+
const exitReason2 = { type: "Exit", message: textContent, object: validated.data, messages };
|
|
2251
|
+
await event("end-task", { kind: "EndTask" /* EndTask */, exitReason: exitReason2 });
|
|
2252
|
+
return exitReason2;
|
|
1569
2253
|
}
|
|
1570
2254
|
const toolResults = [];
|
|
1571
2255
|
for (const toolCall of toolCalls) {
|
|
@@ -1634,77 +2318,243 @@ var agentWorkflow = async (input, { step, tools, logger }) => {
|
|
|
1634
2318
|
}
|
|
1635
2319
|
];
|
|
1636
2320
|
}
|
|
1637
|
-
|
|
1638
|
-
|
|
2321
|
+
const exitReason = { type: "UsageExceeded", messages };
|
|
2322
|
+
await event("end-task", { kind: "EndTask" /* EndTask */, exitReason });
|
|
2323
|
+
return exitReason;
|
|
1639
2324
|
};
|
|
1640
2325
|
|
|
1641
2326
|
// src/workflow/dynamic.ts
|
|
1642
|
-
import { parse } from "yaml";
|
|
1643
|
-
import { z as
|
|
2327
|
+
import { parse as parse2 } from "yaml";
|
|
2328
|
+
import { z as z21 } from "zod";
|
|
1644
2329
|
|
|
1645
2330
|
// src/workflow/dynamic-types.ts
|
|
1646
|
-
import { z as
|
|
1647
|
-
var WorkflowInputDefinitionSchema =
|
|
1648
|
-
id:
|
|
1649
|
-
description:
|
|
1650
|
-
default:
|
|
2331
|
+
import { z as z20 } from "zod";
|
|
2332
|
+
var WorkflowInputDefinitionSchema = z20.object({
|
|
2333
|
+
id: z20.string(),
|
|
2334
|
+
description: z20.string().nullish(),
|
|
2335
|
+
default: z20.any().nullish()
|
|
1651
2336
|
});
|
|
1652
|
-
var WorkflowStepDefinitionSchema =
|
|
1653
|
-
id:
|
|
1654
|
-
tools:
|
|
1655
|
-
task:
|
|
1656
|
-
output:
|
|
1657
|
-
expected_outcome:
|
|
1658
|
-
/**
|
|
1659
|
-
* Persisted JavaScript/TypeScript (JS-compatible) async function body.
|
|
1660
|
-
* The code is wrapped as: `async (ctx) => { <code> }`.
|
|
1661
|
-
*/
|
|
1662
|
-
code: z16.string().nullish(),
|
|
2337
|
+
var WorkflowStepDefinitionSchema = z20.object({
|
|
2338
|
+
id: z20.string(),
|
|
2339
|
+
tools: z20.array(z20.string()).nullish(),
|
|
2340
|
+
task: z20.string(),
|
|
2341
|
+
output: z20.string().nullish(),
|
|
2342
|
+
expected_outcome: z20.string().nullish(),
|
|
1663
2343
|
/**
|
|
1664
2344
|
* Optional JSON schema or other metadata for future structured outputs.
|
|
1665
2345
|
* Not interpreted by core today.
|
|
1666
2346
|
*/
|
|
1667
|
-
outputSchema:
|
|
2347
|
+
outputSchema: z20.any().nullish(),
|
|
1668
2348
|
/**
|
|
1669
2349
|
* Optional timeout in milliseconds. Step execution will be aborted if it exceeds this duration.
|
|
1670
2350
|
*/
|
|
1671
|
-
timeout:
|
|
2351
|
+
timeout: z20.number().positive().nullish()
|
|
2352
|
+
});
|
|
2353
|
+
var WhileLoopStepSchema = z20.object({
|
|
2354
|
+
id: z20.string(),
|
|
2355
|
+
while: z20.object({
|
|
2356
|
+
condition: z20.string().describe("JavaScript expression that evaluates to true/false"),
|
|
2357
|
+
steps: z20.array(z20.lazy(() => WorkflowControlFlowStepSchema))
|
|
2358
|
+
}),
|
|
2359
|
+
output: z20.string().nullish()
|
|
1672
2360
|
});
|
|
1673
|
-
var
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
2361
|
+
var IfElseStepSchema = z20.object({
|
|
2362
|
+
id: z20.string(),
|
|
2363
|
+
if: z20.object({
|
|
2364
|
+
condition: z20.string().describe("JavaScript expression that evaluates to true/false"),
|
|
2365
|
+
thenBranch: z20.array(z20.lazy(() => WorkflowControlFlowStepSchema)),
|
|
2366
|
+
elseBranch: z20.array(z20.lazy(() => WorkflowControlFlowStepSchema)).optional()
|
|
2367
|
+
}),
|
|
2368
|
+
output: z20.string().nullish()
|
|
1678
2369
|
});
|
|
1679
|
-
var
|
|
1680
|
-
|
|
2370
|
+
var BreakStepSchema = z20.object({
|
|
2371
|
+
break: z20.literal(true)
|
|
2372
|
+
});
|
|
2373
|
+
var ContinueStepSchema = z20.object({
|
|
2374
|
+
continue: z20.literal(true)
|
|
2375
|
+
});
|
|
2376
|
+
var TryCatchStepSchema = z20.object({
|
|
2377
|
+
id: z20.string(),
|
|
2378
|
+
try: z20.object({
|
|
2379
|
+
trySteps: z20.array(z20.lazy(() => WorkflowControlFlowStepSchema)),
|
|
2380
|
+
catchSteps: z20.array(z20.lazy(() => WorkflowControlFlowStepSchema))
|
|
2381
|
+
}),
|
|
2382
|
+
output: z20.string().nullish()
|
|
2383
|
+
});
|
|
2384
|
+
var WorkflowControlFlowStepSchema = z20.union([
|
|
2385
|
+
WorkflowStepDefinitionSchema,
|
|
2386
|
+
WhileLoopStepSchema,
|
|
2387
|
+
IfElseStepSchema,
|
|
2388
|
+
BreakStepSchema,
|
|
2389
|
+
ContinueStepSchema,
|
|
2390
|
+
TryCatchStepSchema
|
|
2391
|
+
]);
|
|
2392
|
+
var WorkflowDefinitionSchema = z20.object({
|
|
2393
|
+
task: z20.string(),
|
|
2394
|
+
inputs: z20.array(WorkflowInputDefinitionSchema).nullish(),
|
|
2395
|
+
steps: z20.array(WorkflowControlFlowStepSchema),
|
|
2396
|
+
output: z20.string().nullish()
|
|
2397
|
+
});
|
|
2398
|
+
var WorkflowFileSchema = z20.object({
|
|
2399
|
+
workflows: z20.record(z20.string(), WorkflowDefinitionSchema)
|
|
1681
2400
|
});
|
|
1682
2401
|
|
|
1683
2402
|
// src/workflow/dynamic.ts
|
|
2403
|
+
var MAX_WHILE_LOOP_ITERATIONS = 1e3;
|
|
2404
|
+
function convertJsonSchemaToZod(schema) {
|
|
2405
|
+
if (schema.enum) {
|
|
2406
|
+
return z21.enum(schema.enum.map((v) => String(v)));
|
|
2407
|
+
}
|
|
2408
|
+
if (Array.isArray(schema.type)) {
|
|
2409
|
+
const types = schema.type;
|
|
2410
|
+
if (types.includes("null") && types.length === 2) {
|
|
2411
|
+
const nonNullType = types.find((t) => t !== "null");
|
|
2412
|
+
if (nonNullType === "string") return z21.string().nullable();
|
|
2413
|
+
if (nonNullType === "number") return z21.number().nullable();
|
|
2414
|
+
if (nonNullType === "integer")
|
|
2415
|
+
return z21.number().refine((val) => Number.isInteger(val)).nullable();
|
|
2416
|
+
if (nonNullType === "boolean") return z21.boolean().nullable();
|
|
2417
|
+
if (nonNullType === "object") {
|
|
2418
|
+
const shape = {};
|
|
2419
|
+
if (schema.properties) {
|
|
2420
|
+
for (const [propName, propSchema] of Object.entries(schema.properties)) {
|
|
2421
|
+
const propZod = convertJsonSchemaToZod(propSchema);
|
|
2422
|
+
const isRequired = schema.required?.includes(propName);
|
|
2423
|
+
shape[propName] = isRequired ? propZod : propZod.optional();
|
|
2424
|
+
}
|
|
2425
|
+
}
|
|
2426
|
+
return z21.object(shape).nullable();
|
|
2427
|
+
}
|
|
2428
|
+
if (nonNullType === "array") return z21.array(z21.any()).nullable();
|
|
2429
|
+
}
|
|
2430
|
+
return z21.any();
|
|
2431
|
+
}
|
|
2432
|
+
const type = schema.type;
|
|
2433
|
+
switch (type) {
|
|
2434
|
+
case "string":
|
|
2435
|
+
return z21.string();
|
|
2436
|
+
case "number":
|
|
2437
|
+
return z21.number();
|
|
2438
|
+
case "integer":
|
|
2439
|
+
return z21.number().refine((val) => Number.isInteger(val), { message: "Expected an integer" });
|
|
2440
|
+
case "boolean":
|
|
2441
|
+
return z21.boolean();
|
|
2442
|
+
case "null":
|
|
2443
|
+
return z21.null();
|
|
2444
|
+
case "object": {
|
|
2445
|
+
const shape = {};
|
|
2446
|
+
if (schema.properties) {
|
|
2447
|
+
for (const [propName, propSchema] of Object.entries(schema.properties)) {
|
|
2448
|
+
const propZod = convertJsonSchemaToZod(propSchema);
|
|
2449
|
+
const isRequired = schema.required?.includes(propName);
|
|
2450
|
+
shape[propName] = isRequired ? propZod : propZod.optional();
|
|
2451
|
+
}
|
|
2452
|
+
}
|
|
2453
|
+
let objectSchema = z21.object(shape);
|
|
2454
|
+
if (schema.additionalProperties === true) {
|
|
2455
|
+
objectSchema = objectSchema.and(z21.any());
|
|
2456
|
+
} else if (typeof schema.additionalProperties === "object") {
|
|
2457
|
+
const additionalSchema = convertJsonSchemaToZod(schema.additionalProperties);
|
|
2458
|
+
objectSchema = objectSchema.and(z21.record(z21.string(), additionalSchema));
|
|
2459
|
+
}
|
|
2460
|
+
return objectSchema;
|
|
2461
|
+
}
|
|
2462
|
+
case "array": {
|
|
2463
|
+
if (!schema.items) {
|
|
2464
|
+
return z21.array(z21.any());
|
|
2465
|
+
}
|
|
2466
|
+
const itemSchema = convertJsonSchemaToZod(schema.items);
|
|
2467
|
+
return z21.array(itemSchema);
|
|
2468
|
+
}
|
|
2469
|
+
default:
|
|
2470
|
+
return z21.any();
|
|
2471
|
+
}
|
|
2472
|
+
}
|
|
1684
2473
|
var TOOL_GROUPS = {
|
|
1685
2474
|
readonly: ["readFile", "readBinaryFile", "listFiles", "searchFiles"],
|
|
1686
2475
|
readwrite: ["readFile", "readBinaryFile", "listFiles", "searchFiles", "writeToFile", "replaceInFile", "removeFile", "renameFile"],
|
|
1687
2476
|
internet: ["fetchUrl", "search"]
|
|
1688
2477
|
};
|
|
2478
|
+
function validateWorkflowFile(definition) {
|
|
2479
|
+
const errors = [];
|
|
2480
|
+
for (const [workflowId, workflow] of Object.entries(definition.workflows)) {
|
|
2481
|
+
if (!workflow.steps || workflow.steps.length === 0) {
|
|
2482
|
+
errors.push(`Workflow '${workflowId}' has no steps`);
|
|
2483
|
+
continue;
|
|
2484
|
+
}
|
|
2485
|
+
const checkBreakOutsideLoop = (steps, inLoop, path) => {
|
|
2486
|
+
for (const step of steps) {
|
|
2487
|
+
if (isBreakStep(step) || isContinueStep(step)) {
|
|
2488
|
+
if (!inLoop) {
|
|
2489
|
+
errors.push(`${path} has break/continue outside of a loop`);
|
|
2490
|
+
}
|
|
2491
|
+
}
|
|
2492
|
+
if (isWhileLoopStep(step)) {
|
|
2493
|
+
checkBreakOutsideLoop(step.while.steps, true, `${path}/${step.id}`);
|
|
2494
|
+
}
|
|
2495
|
+
if (isIfElseStep(step)) {
|
|
2496
|
+
if (step.if.thenBranch) {
|
|
2497
|
+
checkBreakOutsideLoop(step.if.thenBranch, inLoop, `${path}/${step.id}/then`);
|
|
2498
|
+
}
|
|
2499
|
+
if (step.if.elseBranch) {
|
|
2500
|
+
checkBreakOutsideLoop(step.if.elseBranch, inLoop, `${path}/${step.id}/else`);
|
|
2501
|
+
}
|
|
2502
|
+
}
|
|
2503
|
+
if (isTryCatchStep(step)) {
|
|
2504
|
+
checkBreakOutsideLoop(step.try.trySteps, inLoop, `${path}/${step.id}/try`);
|
|
2505
|
+
checkBreakOutsideLoop(step.try.catchSteps, inLoop, `${path}/${step.id}/catch`);
|
|
2506
|
+
}
|
|
2507
|
+
}
|
|
2508
|
+
};
|
|
2509
|
+
checkBreakOutsideLoop(workflow.steps, false, workflowId);
|
|
2510
|
+
const findRunWorkflowCalls = (steps, path) => {
|
|
2511
|
+
for (const step of steps) {
|
|
2512
|
+
if (isWhileLoopStep(step)) {
|
|
2513
|
+
findRunWorkflowCalls(step.while.steps, `${path}/${step.id}`);
|
|
2514
|
+
}
|
|
2515
|
+
if (isIfElseStep(step)) {
|
|
2516
|
+
if (step.if.thenBranch) {
|
|
2517
|
+
findRunWorkflowCalls(step.if.thenBranch, `${path}/${step.id}/then`);
|
|
2518
|
+
}
|
|
2519
|
+
if (step.if.elseBranch) {
|
|
2520
|
+
findRunWorkflowCalls(step.if.elseBranch, `${path}/${step.id}/else`);
|
|
2521
|
+
}
|
|
2522
|
+
}
|
|
2523
|
+
if (isTryCatchStep(step)) {
|
|
2524
|
+
findRunWorkflowCalls(step.try.trySteps, `${path}/${step.id}/try`);
|
|
2525
|
+
findRunWorkflowCalls(step.try.catchSteps, `${path}/${step.id}/catch`);
|
|
2526
|
+
}
|
|
2527
|
+
}
|
|
2528
|
+
};
|
|
2529
|
+
findRunWorkflowCalls(workflow.steps, workflowId);
|
|
2530
|
+
}
|
|
2531
|
+
if (errors.length > 0) {
|
|
2532
|
+
return { success: false, errors };
|
|
2533
|
+
}
|
|
2534
|
+
return { success: true };
|
|
2535
|
+
}
|
|
1689
2536
|
function parseDynamicWorkflowDefinition(source) {
|
|
1690
2537
|
try {
|
|
1691
|
-
const raw =
|
|
2538
|
+
const raw = parse2(source);
|
|
1692
2539
|
const validated = WorkflowFileSchema.safeParse(raw);
|
|
1693
2540
|
if (!validated.success) {
|
|
1694
|
-
return { success: false, error:
|
|
2541
|
+
return { success: false, error: z21.prettifyError(validated.error) };
|
|
2542
|
+
}
|
|
2543
|
+
const validation = validateWorkflowFile(validated.data);
|
|
2544
|
+
if (!validation.success) {
|
|
2545
|
+
return { success: false, error: `Workflow validation failed:
|
|
2546
|
+
${validation.errors.map((e) => ` - ${e}`).join("\n")}` };
|
|
1695
2547
|
}
|
|
1696
2548
|
return { success: true, definition: validated.data };
|
|
1697
2549
|
} catch (error) {
|
|
1698
2550
|
return { success: false, error: error instanceof Error ? error.message : String(error) };
|
|
1699
2551
|
}
|
|
1700
2552
|
}
|
|
1701
|
-
var AsyncFunction = Object.getPrototypeOf(async () => {
|
|
1702
|
-
}).constructor;
|
|
1703
2553
|
function validateAndApplyDefaults(workflowId, workflow, input) {
|
|
1704
2554
|
if (!workflow.inputs || workflow.inputs.length === 0) {
|
|
1705
2555
|
return input;
|
|
1706
2556
|
}
|
|
1707
|
-
const validatedInput = {};
|
|
2557
|
+
const validatedInput = { ...input };
|
|
1708
2558
|
const errors = [];
|
|
1709
2559
|
for (const inputDef of workflow.inputs) {
|
|
1710
2560
|
const providedValue = input[inputDef.id];
|
|
@@ -1722,36 +2572,217 @@ ${errors.map((e) => ` - ${e}`).join("\n")}`);
|
|
|
1722
2572
|
}
|
|
1723
2573
|
return validatedInput;
|
|
1724
2574
|
}
|
|
2575
|
+
function evaluateCondition(condition, input, state, allowUnsafeCodeExecution = false) {
|
|
2576
|
+
if (allowUnsafeCodeExecution) {
|
|
2577
|
+
const functionBody = `
|
|
2578
|
+
try {
|
|
2579
|
+
return ${condition};
|
|
2580
|
+
} catch (error) {
|
|
2581
|
+
throw new Error('Condition evaluation failed: ' + (error instanceof Error ? error.message : String(error)));
|
|
2582
|
+
}
|
|
2583
|
+
`;
|
|
2584
|
+
try {
|
|
2585
|
+
const fn = new Function("input", "state", functionBody);
|
|
2586
|
+
const result = fn(input, state);
|
|
2587
|
+
return Boolean(result);
|
|
2588
|
+
} catch (error) {
|
|
2589
|
+
throw new Error(`Failed to evaluate condition: ${condition}. Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
2590
|
+
}
|
|
2591
|
+
} else {
|
|
2592
|
+
return evaluateConditionSafe(condition, input, state);
|
|
2593
|
+
}
|
|
2594
|
+
}
|
|
2595
|
+
function evaluateConditionSafe(condition, input, state) {
|
|
2596
|
+
condition = condition.trim();
|
|
2597
|
+
if (condition === "true") return true;
|
|
2598
|
+
if (condition === "false") return false;
|
|
2599
|
+
const orIndex = findTopLevelOperator(condition, "||");
|
|
2600
|
+
if (orIndex !== -1) {
|
|
2601
|
+
const left = condition.slice(0, orIndex).trim();
|
|
2602
|
+
const right = condition.slice(orIndex + 2).trim();
|
|
2603
|
+
return evaluateConditionSafe(left, input, state) || evaluateConditionSafe(right, input, state);
|
|
2604
|
+
}
|
|
2605
|
+
const andIndex = findTopLevelOperator(condition, "&&");
|
|
2606
|
+
if (andIndex !== -1) {
|
|
2607
|
+
const left = condition.slice(0, andIndex).trim();
|
|
2608
|
+
const right = condition.slice(andIndex + 2).trim();
|
|
2609
|
+
return evaluateConditionSafe(left, input, state) && evaluateConditionSafe(right, input, state);
|
|
2610
|
+
}
|
|
2611
|
+
const comparisonOps = ["===", "!==", "==", "!=", ">=", "<=", ">", "<"];
|
|
2612
|
+
for (const op of comparisonOps) {
|
|
2613
|
+
const opIndex = findTopLevelOperator(condition, op);
|
|
2614
|
+
if (opIndex !== -1) {
|
|
2615
|
+
const left = evaluateValue(condition.slice(0, opIndex).trim(), input, state);
|
|
2616
|
+
const right = evaluateValue(condition.slice(opIndex + op.length).trim(), input, state);
|
|
2617
|
+
return compareValues(left, right, op);
|
|
2618
|
+
}
|
|
2619
|
+
}
|
|
2620
|
+
if (condition.startsWith("!")) {
|
|
2621
|
+
return !evaluateConditionSafe(condition.slice(1).trim(), input, state);
|
|
2622
|
+
}
|
|
2623
|
+
if (hasEnclosingParens(condition)) {
|
|
2624
|
+
const inner = condition.slice(1, -1);
|
|
2625
|
+
return evaluateConditionSafe(inner, input, state);
|
|
2626
|
+
}
|
|
2627
|
+
const value = evaluateValue(condition, input, state);
|
|
2628
|
+
return Boolean(value);
|
|
2629
|
+
}
|
|
2630
|
+
function findTopLevelOperator(expr, op) {
|
|
2631
|
+
let parenDepth = 0;
|
|
2632
|
+
let inString = false;
|
|
2633
|
+
let stringChar = "";
|
|
2634
|
+
let escapeNext = false;
|
|
2635
|
+
for (let i = 0; i <= expr.length - op.length; i++) {
|
|
2636
|
+
const char = expr[i];
|
|
2637
|
+
if (escapeNext) {
|
|
2638
|
+
escapeNext = false;
|
|
2639
|
+
continue;
|
|
2640
|
+
}
|
|
2641
|
+
if (char === "\\") {
|
|
2642
|
+
escapeNext = true;
|
|
2643
|
+
continue;
|
|
2644
|
+
}
|
|
2645
|
+
if (!inString && (char === '"' || char === "'")) {
|
|
2646
|
+
inString = true;
|
|
2647
|
+
stringChar = char;
|
|
2648
|
+
continue;
|
|
2649
|
+
}
|
|
2650
|
+
if (inString && char === stringChar) {
|
|
2651
|
+
inString = false;
|
|
2652
|
+
stringChar = "";
|
|
2653
|
+
continue;
|
|
2654
|
+
}
|
|
2655
|
+
if (inString) continue;
|
|
2656
|
+
if (char === "(") parenDepth++;
|
|
2657
|
+
if (char === ")") parenDepth--;
|
|
2658
|
+
if (parenDepth === 0 && expr.slice(i, i + op.length) === op) {
|
|
2659
|
+
return i;
|
|
2660
|
+
}
|
|
2661
|
+
}
|
|
2662
|
+
return -1;
|
|
2663
|
+
}
|
|
2664
|
+
function hasEnclosingParens(expr) {
|
|
2665
|
+
expr = expr.trim();
|
|
2666
|
+
if (!expr.startsWith("(") || !expr.endsWith(")")) {
|
|
2667
|
+
return false;
|
|
2668
|
+
}
|
|
2669
|
+
let depth = 0;
|
|
2670
|
+
let inString = false;
|
|
2671
|
+
let stringChar = "";
|
|
2672
|
+
let escapeNext = false;
|
|
2673
|
+
for (let i = 0; i < expr.length; i++) {
|
|
2674
|
+
const char = expr[i];
|
|
2675
|
+
if (escapeNext) {
|
|
2676
|
+
escapeNext = false;
|
|
2677
|
+
continue;
|
|
2678
|
+
}
|
|
2679
|
+
if (char === "\\") {
|
|
2680
|
+
escapeNext = true;
|
|
2681
|
+
continue;
|
|
2682
|
+
}
|
|
2683
|
+
if (!inString && (char === '"' || char === "'")) {
|
|
2684
|
+
inString = true;
|
|
2685
|
+
stringChar = char;
|
|
2686
|
+
continue;
|
|
2687
|
+
}
|
|
2688
|
+
if (inString && char === stringChar) {
|
|
2689
|
+
inString = false;
|
|
2690
|
+
stringChar = "";
|
|
2691
|
+
continue;
|
|
2692
|
+
}
|
|
2693
|
+
if (inString) continue;
|
|
2694
|
+
if (char === "(") {
|
|
2695
|
+
depth++;
|
|
2696
|
+
if (i === 0) depth = 1;
|
|
2697
|
+
}
|
|
2698
|
+
if (char === ")") {
|
|
2699
|
+
depth--;
|
|
2700
|
+
if (depth === 0 && i === expr.length - 1) {
|
|
2701
|
+
return true;
|
|
2702
|
+
}
|
|
2703
|
+
if (depth === 0 && i < expr.length - 1) {
|
|
2704
|
+
return false;
|
|
2705
|
+
}
|
|
2706
|
+
}
|
|
2707
|
+
}
|
|
2708
|
+
return false;
|
|
2709
|
+
}
|
|
2710
|
+
function evaluateValue(expr, input, state) {
|
|
2711
|
+
expr = expr.trim();
|
|
2712
|
+
const stringMatch = expr.match(/^(["'])(?:(?=(\\?))\2.)*?\1$/);
|
|
2713
|
+
if (stringMatch) {
|
|
2714
|
+
const quote = stringMatch[1];
|
|
2715
|
+
if (quote === '"') {
|
|
2716
|
+
try {
|
|
2717
|
+
return JSON.parse(expr);
|
|
2718
|
+
} catch (error) {
|
|
2719
|
+
throw new Error(`Invalid string literal: "${expr}". Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
2720
|
+
}
|
|
2721
|
+
} else {
|
|
2722
|
+
let inner = expr.slice(1, -1);
|
|
2723
|
+
inner = inner.replace(/\\'/g, "'");
|
|
2724
|
+
inner = inner.replace(/\\"/g, '"');
|
|
2725
|
+
const converted = `"${inner.replace(/\\/g, "\\\\").replace(/"/g, '\\"')}"`;
|
|
2726
|
+
try {
|
|
2727
|
+
return JSON.parse(converted);
|
|
2728
|
+
} catch (error) {
|
|
2729
|
+
throw new Error(`Invalid string literal: "${expr}". Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
2730
|
+
}
|
|
2731
|
+
}
|
|
2732
|
+
}
|
|
2733
|
+
if (/^-?\d*\.?\d+(?:[eE][+-]?\d+)?$/.test(expr)) {
|
|
2734
|
+
return Number.parseFloat(expr);
|
|
2735
|
+
}
|
|
2736
|
+
if (expr === "true") return true;
|
|
2737
|
+
if (expr === "false") return false;
|
|
2738
|
+
if (expr === "null") return null;
|
|
2739
|
+
if (expr.startsWith("input.")) {
|
|
2740
|
+
return getNestedProperty(input, expr.slice(6));
|
|
2741
|
+
}
|
|
2742
|
+
if (expr.startsWith("state.")) {
|
|
2743
|
+
return getNestedProperty(state, expr.slice(6));
|
|
2744
|
+
}
|
|
2745
|
+
throw new Error(
|
|
2746
|
+
`Unrecognized expression in condition: "${expr}". Valid expressions are: string literals, numbers, boolean literals, null, or property access like "input.foo" or "state.bar"`
|
|
2747
|
+
);
|
|
2748
|
+
}
|
|
2749
|
+
function getNestedProperty(obj, path) {
|
|
2750
|
+
const parts = path.split(".");
|
|
2751
|
+
let current = obj;
|
|
2752
|
+
for (const part of parts) {
|
|
2753
|
+
if (current == null) return void 0;
|
|
2754
|
+
current = current[part];
|
|
2755
|
+
}
|
|
2756
|
+
return current;
|
|
2757
|
+
}
|
|
2758
|
+
function compareValues(left, right, op) {
|
|
2759
|
+
switch (op) {
|
|
2760
|
+
case "===":
|
|
2761
|
+
return left === right;
|
|
2762
|
+
case "!==":
|
|
2763
|
+
return left !== right;
|
|
2764
|
+
case "==":
|
|
2765
|
+
return Object.is(left, right);
|
|
2766
|
+
case "!=":
|
|
2767
|
+
return !Object.is(left, right);
|
|
2768
|
+
case ">=":
|
|
2769
|
+
return left >= right;
|
|
2770
|
+
case "<=":
|
|
2771
|
+
return left <= right;
|
|
2772
|
+
case ">":
|
|
2773
|
+
return left > right;
|
|
2774
|
+
case "<":
|
|
2775
|
+
return left < right;
|
|
2776
|
+
default:
|
|
2777
|
+
throw new Error(`Unknown comparison operator: ${op}`);
|
|
2778
|
+
}
|
|
2779
|
+
}
|
|
1725
2780
|
function createRunWorkflowFn(args) {
|
|
1726
2781
|
return async (subWorkflowId, subInput) => {
|
|
1727
2782
|
const mergedInput = { ...args.input, ...args.state, ...subInput ?? {} };
|
|
1728
2783
|
return await args.runInternal(subWorkflowId, mergedInput, args.context, args.state);
|
|
1729
2784
|
};
|
|
1730
2785
|
}
|
|
1731
|
-
function compileStep(stepDef, workflowId, compiledSteps) {
|
|
1732
|
-
const key = `${workflowId}.${stepDef.id}`;
|
|
1733
|
-
const existing = compiledSteps.get(key);
|
|
1734
|
-
if (existing) {
|
|
1735
|
-
return existing;
|
|
1736
|
-
}
|
|
1737
|
-
if (!stepDef.code) {
|
|
1738
|
-
throw new Error(`Step '${stepDef.id}' in workflow '${workflowId}' has no code`);
|
|
1739
|
-
}
|
|
1740
|
-
try {
|
|
1741
|
-
const fn = new AsyncFunction("ctx", stepDef.code);
|
|
1742
|
-
compiledSteps.set(key, fn);
|
|
1743
|
-
return fn;
|
|
1744
|
-
} catch (error) {
|
|
1745
|
-
const errorMsg = error instanceof Error ? error.message : String(error);
|
|
1746
|
-
const codePreview = stepDef.code.length > 200 ? `${stepDef.code.substring(0, 200)}...` : stepDef.code;
|
|
1747
|
-
throw new Error(
|
|
1748
|
-
`Failed to compile code for step '${stepDef.id}' in workflow '${workflowId}':
|
|
1749
|
-
Error: ${errorMsg}
|
|
1750
|
-
Code:
|
|
1751
|
-
${codePreview.split("\n").map((line) => ` ${line}`).join("\n")}`
|
|
1752
|
-
);
|
|
1753
|
-
}
|
|
1754
|
-
}
|
|
1755
2786
|
async function executeStepWithAgent(stepDef, workflowId, input, state, context, options, runInternal) {
|
|
1756
2787
|
const tools = context.tools;
|
|
1757
2788
|
if (typeof tools.generateText !== "function" || typeof tools.invokeTool !== "function" || typeof tools.taskEvent !== "function") {
|
|
@@ -1794,9 +2825,9 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
|
|
|
1794
2825
|
toolsForAgent.push({
|
|
1795
2826
|
name: "runWorkflow",
|
|
1796
2827
|
description: "Run a named sub-workflow defined in the current workflow file.",
|
|
1797
|
-
parameters:
|
|
1798
|
-
workflowId:
|
|
1799
|
-
input:
|
|
2828
|
+
parameters: z21.object({
|
|
2829
|
+
workflowId: z21.string().describe("Sub-workflow id to run"),
|
|
2830
|
+
input: z21.any().nullish().describe("Optional input object for the sub-workflow")
|
|
1800
2831
|
}),
|
|
1801
2832
|
handler: async () => {
|
|
1802
2833
|
return { success: false, message: { type: "error-text", value: "runWorkflow is virtual." } };
|
|
@@ -1886,38 +2917,11 @@ async function executeStepWithAgent(stepDef, workflowId, input, state, context,
|
|
|
1886
2917
|
if (result.type === "UsageExceeded") {
|
|
1887
2918
|
throw new Error(`Agent step '${stepDef.id}' in workflow '${workflowId}' exceeded usage limits (tokens or rounds)`);
|
|
1888
2919
|
}
|
|
1889
|
-
|
|
2920
|
+
const _exhaustiveCheck = result;
|
|
2921
|
+
throw new Error(`Agent step '${stepDef.id}' in workflow '${workflowId}' exited unexpectedly with unhandled type`);
|
|
1890
2922
|
}
|
|
1891
|
-
async function executeStepWithTimeout(stepDef, workflowId, input, state, context, options,
|
|
2923
|
+
async function executeStepWithTimeout(stepDef, workflowId, input, state, context, options, runInternal) {
|
|
1892
2924
|
const executeStepLogic = async () => {
|
|
1893
|
-
if (stepDef.code && options.allowUnsafeCodeExecution) {
|
|
1894
|
-
context.logger.debug(`[Step] Executing step '${stepDef.id}' with compiled code`);
|
|
1895
|
-
const fn = compileStep(stepDef, workflowId, compiledSteps);
|
|
1896
|
-
const runWorkflow = createRunWorkflowFn({ input, state, context, runInternal });
|
|
1897
|
-
const agentTools = {};
|
|
1898
|
-
if (options.toolInfo) {
|
|
1899
|
-
for (const tool of options.toolInfo) {
|
|
1900
|
-
if (typeof context.tools[tool.name] === "function") {
|
|
1901
|
-
agentTools[tool.name] = context.tools[tool.name];
|
|
1902
|
-
}
|
|
1903
|
-
}
|
|
1904
|
-
}
|
|
1905
|
-
const runtimeCtx = {
|
|
1906
|
-
workflowId,
|
|
1907
|
-
stepId: stepDef.id,
|
|
1908
|
-
input,
|
|
1909
|
-
state,
|
|
1910
|
-
tools: context.tools,
|
|
1911
|
-
logger: context.logger,
|
|
1912
|
-
step: context.step,
|
|
1913
|
-
runWorkflow,
|
|
1914
|
-
toolInfo: options.toolInfo,
|
|
1915
|
-
agentTools
|
|
1916
|
-
};
|
|
1917
|
-
const result2 = await fn(runtimeCtx);
|
|
1918
|
-
context.logger.debug(`[Step] Compiled code execution completed for step '${stepDef.id}'`);
|
|
1919
|
-
return result2;
|
|
1920
|
-
}
|
|
1921
2925
|
context.logger.debug(`[Step] Executing step '${stepDef.id}' with agent`);
|
|
1922
2926
|
const result = await executeStepWithAgent(stepDef, workflowId, input, state, context, options, runInternal);
|
|
1923
2927
|
context.logger.debug(`[Step] Agent execution completed for step '${stepDef.id}'`);
|
|
@@ -1940,22 +2944,19 @@ async function executeStepWithTimeout(stepDef, workflowId, input, state, context
|
|
|
1940
2944
|
}
|
|
1941
2945
|
return await executeStepLogic();
|
|
1942
2946
|
}
|
|
1943
|
-
async function executeStep(stepDef, workflowId, input, state, context, options,
|
|
1944
|
-
const result = await executeStepWithTimeout(stepDef, workflowId, input, state, context, options,
|
|
2947
|
+
async function executeStep(stepDef, workflowId, input, state, context, options, runInternal) {
|
|
2948
|
+
const result = await executeStepWithTimeout(stepDef, workflowId, input, state, context, options, runInternal);
|
|
1945
2949
|
if (stepDef.outputSchema) {
|
|
1946
2950
|
try {
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
throw new Error(`Expected array output, got ${typeof result}`);
|
|
1957
|
-
}
|
|
1958
|
-
}
|
|
2951
|
+
context.logger.debug(`[Step] Validating output for step '${stepDef.id}' against schema`);
|
|
2952
|
+
const zodSchema = convertJsonSchemaToZod(stepDef.outputSchema);
|
|
2953
|
+
const validationResult = zodSchema.safeParse(result);
|
|
2954
|
+
if (!validationResult.success) {
|
|
2955
|
+
const errorDetails = validationResult.error.issues.map((e) => ` - ${e.path.join(".") || "root"}: ${e.message}`).join("\n");
|
|
2956
|
+
throw new Error(`Output does not match expected schema:
|
|
2957
|
+
${errorDetails}`);
|
|
2958
|
+
}
|
|
2959
|
+
context.logger.debug(`[Step] Output validation successful for step '${stepDef.id}'`);
|
|
1959
2960
|
} catch (error) {
|
|
1960
2961
|
throw new Error(
|
|
1961
2962
|
`Step '${stepDef.id}' in workflow '${workflowId}' output validation failed: ${error instanceof Error ? error.message : String(error)}`
|
|
@@ -1964,6 +2965,204 @@ async function executeStep(stepDef, workflowId, input, state, context, options,
|
|
|
1964
2965
|
}
|
|
1965
2966
|
return result;
|
|
1966
2967
|
}
|
|
2968
|
+
function isBreakStep(step) {
|
|
2969
|
+
return typeof step === "object" && step !== null && "break" in step && step.break === true;
|
|
2970
|
+
}
|
|
2971
|
+
function isContinueStep(step) {
|
|
2972
|
+
return typeof step === "object" && step !== null && "continue" in step && step.continue === true;
|
|
2973
|
+
}
|
|
2974
|
+
function isWhileLoopStep(step) {
|
|
2975
|
+
return typeof step === "object" && step !== null && "while" in step;
|
|
2976
|
+
}
|
|
2977
|
+
function isIfElseStep(step) {
|
|
2978
|
+
return typeof step === "object" && step !== null && "if" in step;
|
|
2979
|
+
}
|
|
2980
|
+
function isTryCatchStep(step) {
|
|
2981
|
+
return typeof step === "object" && step !== null && "try" in step;
|
|
2982
|
+
}
|
|
2983
|
+
function storeStepOutput(step, result, state) {
|
|
2984
|
+
if ("id" in step && step.output) {
|
|
2985
|
+
const outputKey = step.output;
|
|
2986
|
+
state[outputKey] = result;
|
|
2987
|
+
}
|
|
2988
|
+
}
|
|
2989
|
+
function getStepId(step) {
|
|
2990
|
+
if ("id" in step && step.id) {
|
|
2991
|
+
return step.id;
|
|
2992
|
+
}
|
|
2993
|
+
if (isWhileLoopStep(step)) {
|
|
2994
|
+
return "while";
|
|
2995
|
+
}
|
|
2996
|
+
if (isIfElseStep(step)) {
|
|
2997
|
+
return "if";
|
|
2998
|
+
}
|
|
2999
|
+
if (isTryCatchStep(step)) {
|
|
3000
|
+
return "try";
|
|
3001
|
+
}
|
|
3002
|
+
return "control";
|
|
3003
|
+
}
|
|
3004
|
+
async function executeControlFlowStep(step, workflowId, input, state, context, options, runInternal, loopDepth, breakFlag, continueFlag) {
|
|
3005
|
+
if (isBreakStep(step)) {
|
|
3006
|
+
if (loopDepth === 0) {
|
|
3007
|
+
throw new Error(`'break' statement found outside of a loop in workflow '${workflowId}'`);
|
|
3008
|
+
}
|
|
3009
|
+
context.logger.debug(`[ControlFlow] Executing break statement (loop depth: ${loopDepth})`);
|
|
3010
|
+
return { result: void 0, shouldBreak: true, shouldContinue: false };
|
|
3011
|
+
}
|
|
3012
|
+
if (isContinueStep(step)) {
|
|
3013
|
+
if (loopDepth === 0) {
|
|
3014
|
+
throw new Error(`'continue' statement found outside of a loop in workflow '${workflowId}'`);
|
|
3015
|
+
}
|
|
3016
|
+
context.logger.debug(`[ControlFlow] Executing continue statement (loop depth: ${loopDepth})`);
|
|
3017
|
+
return { result: void 0, shouldBreak: false, shouldContinue: true };
|
|
3018
|
+
}
|
|
3019
|
+
if (isWhileLoopStep(step)) {
|
|
3020
|
+
context.logger.info(`[ControlFlow] Executing while loop '${step.id}'`);
|
|
3021
|
+
context.logger.debug(`[ControlFlow] Condition: ${step.while.condition}`);
|
|
3022
|
+
context.logger.debug(`[ControlFlow] Loop body has ${step.while.steps.length} step(s)`);
|
|
3023
|
+
let iterationCount = 0;
|
|
3024
|
+
let loopResult;
|
|
3025
|
+
while (true) {
|
|
3026
|
+
iterationCount++;
|
|
3027
|
+
if (iterationCount > MAX_WHILE_LOOP_ITERATIONS) {
|
|
3028
|
+
throw new Error(
|
|
3029
|
+
`While loop '${step.id}' in workflow '${workflowId}' exceeded maximum iteration limit of ${MAX_WHILE_LOOP_ITERATIONS}`
|
|
3030
|
+
);
|
|
3031
|
+
}
|
|
3032
|
+
const conditionResult = evaluateCondition(step.while.condition, input, state, options.allowUnsafeCodeExecution);
|
|
3033
|
+
context.logger.debug(`[ControlFlow] While loop '${step.id}' iteration ${iterationCount}: condition = ${conditionResult}`);
|
|
3034
|
+
if (!conditionResult) {
|
|
3035
|
+
context.logger.info(`[ControlFlow] While loop '${step.id}' terminated after ${iterationCount - 1} iteration(s)`);
|
|
3036
|
+
break;
|
|
3037
|
+
}
|
|
3038
|
+
for (const bodyStep of step.while.steps) {
|
|
3039
|
+
const { result, shouldBreak, shouldContinue } = await executeControlFlowStep(
|
|
3040
|
+
bodyStep,
|
|
3041
|
+
workflowId,
|
|
3042
|
+
input,
|
|
3043
|
+
state,
|
|
3044
|
+
context,
|
|
3045
|
+
options,
|
|
3046
|
+
runInternal,
|
|
3047
|
+
loopDepth + 1,
|
|
3048
|
+
breakFlag,
|
|
3049
|
+
continueFlag
|
|
3050
|
+
);
|
|
3051
|
+
if (shouldBreak) {
|
|
3052
|
+
context.logger.debug(`[ControlFlow] Breaking from while loop '${step.id}'`);
|
|
3053
|
+
breakFlag.value = false;
|
|
3054
|
+
return { result: loopResult, shouldBreak: false, shouldContinue: false };
|
|
3055
|
+
}
|
|
3056
|
+
if (shouldContinue) {
|
|
3057
|
+
context.logger.debug(`[ControlFlow] Continuing to next iteration of while loop '${step.id}'`);
|
|
3058
|
+
continueFlag.value = false;
|
|
3059
|
+
break;
|
|
3060
|
+
}
|
|
3061
|
+
storeStepOutput(bodyStep, result, state);
|
|
3062
|
+
loopResult = result;
|
|
3063
|
+
}
|
|
3064
|
+
}
|
|
3065
|
+
const outputKey = step.output ?? step.id;
|
|
3066
|
+
state[outputKey] = loopResult;
|
|
3067
|
+
context.logger.debug(`[ControlFlow] While loop '${step.id}' stored output as '${outputKey}'`);
|
|
3068
|
+
return { result: loopResult, shouldBreak: false, shouldContinue: false };
|
|
3069
|
+
}
|
|
3070
|
+
if (isIfElseStep(step)) {
|
|
3071
|
+
const ifStep = step;
|
|
3072
|
+
context.logger.info(`[ControlFlow] Executing if/else branch '${ifStep.id}'`);
|
|
3073
|
+
context.logger.debug(`[ControlFlow] Condition: ${ifStep.if.condition}`);
|
|
3074
|
+
context.logger.debug(`[ControlFlow] Then branch has ${ifStep.if.thenBranch.length} step(s)`);
|
|
3075
|
+
if (ifStep.if.elseBranch) {
|
|
3076
|
+
context.logger.debug(`[ControlFlow] Else branch has ${ifStep.if.elseBranch.length} step(s)`);
|
|
3077
|
+
}
|
|
3078
|
+
const conditionResult = evaluateCondition(ifStep.if.condition, input, state, options.allowUnsafeCodeExecution);
|
|
3079
|
+
context.logger.debug(`[ControlFlow] If/else '${ifStep.id}' condition = ${conditionResult}`);
|
|
3080
|
+
const branchSteps = conditionResult ? ifStep.if.thenBranch : ifStep.if.elseBranch ?? [];
|
|
3081
|
+
const branchName = conditionResult ? "then" : ifStep.if.elseBranch ? "else" : "else (empty)";
|
|
3082
|
+
context.logger.info(`[ControlFlow] Taking '${branchName}' branch of '${ifStep.id}'`);
|
|
3083
|
+
let branchResult;
|
|
3084
|
+
for (const branchStep of branchSteps) {
|
|
3085
|
+
const { result, shouldBreak, shouldContinue } = await executeControlFlowStep(
|
|
3086
|
+
branchStep,
|
|
3087
|
+
workflowId,
|
|
3088
|
+
input,
|
|
3089
|
+
state,
|
|
3090
|
+
context,
|
|
3091
|
+
options,
|
|
3092
|
+
runInternal,
|
|
3093
|
+
loopDepth,
|
|
3094
|
+
breakFlag,
|
|
3095
|
+
continueFlag
|
|
3096
|
+
);
|
|
3097
|
+
if (shouldBreak || shouldContinue) {
|
|
3098
|
+
return { result, shouldBreak, shouldContinue };
|
|
3099
|
+
}
|
|
3100
|
+
storeStepOutput(branchStep, result, state);
|
|
3101
|
+
branchResult = result;
|
|
3102
|
+
}
|
|
3103
|
+
const outputKey = ifStep.output ?? ifStep.id;
|
|
3104
|
+
state[outputKey] = branchResult;
|
|
3105
|
+
context.logger.debug(`[ControlFlow] If/else '${ifStep.id}' stored output as '${outputKey}'`);
|
|
3106
|
+
return { result: branchResult, shouldBreak: false, shouldContinue: false };
|
|
3107
|
+
}
|
|
3108
|
+
if (isTryCatchStep(step)) {
|
|
3109
|
+
const tryStep = step;
|
|
3110
|
+
context.logger.info(`[ControlFlow] Executing try/catch block '${tryStep.id}'`);
|
|
3111
|
+
context.logger.debug(`[ControlFlow] Try block has ${tryStep.try.trySteps.length} step(s)`);
|
|
3112
|
+
context.logger.debug(`[ControlFlow] Catch block has ${tryStep.try.catchSteps.length} step(s)`);
|
|
3113
|
+
let tryResult;
|
|
3114
|
+
let caughtError;
|
|
3115
|
+
try {
|
|
3116
|
+
for (const tryStepItem of tryStep.try.trySteps) {
|
|
3117
|
+
const { result } = await executeControlFlowStep(
|
|
3118
|
+
tryStepItem,
|
|
3119
|
+
workflowId,
|
|
3120
|
+
input,
|
|
3121
|
+
state,
|
|
3122
|
+
context,
|
|
3123
|
+
options,
|
|
3124
|
+
runInternal,
|
|
3125
|
+
loopDepth,
|
|
3126
|
+
breakFlag,
|
|
3127
|
+
continueFlag
|
|
3128
|
+
);
|
|
3129
|
+
storeStepOutput(tryStepItem, result, state);
|
|
3130
|
+
tryResult = result;
|
|
3131
|
+
}
|
|
3132
|
+
const outputKey = tryStep.output ?? tryStep.id;
|
|
3133
|
+
state[outputKey] = tryResult;
|
|
3134
|
+
context.logger.debug(`[ControlFlow] Try/catch '${tryStep.id}' completed successfully`);
|
|
3135
|
+
return { result: tryResult, shouldBreak: false, shouldContinue: false };
|
|
3136
|
+
} catch (error) {
|
|
3137
|
+
caughtError = error instanceof Error ? error : new Error(String(error));
|
|
3138
|
+
context.logger.warn(`[ControlFlow] Try/catch '${tryStep.id}' caught error: ${caughtError.message}`);
|
|
3139
|
+
let catchResult;
|
|
3140
|
+
for (const catchStepItem of tryStep.try.catchSteps) {
|
|
3141
|
+
const { result } = await executeControlFlowStep(
|
|
3142
|
+
catchStepItem,
|
|
3143
|
+
workflowId,
|
|
3144
|
+
input,
|
|
3145
|
+
state,
|
|
3146
|
+
context,
|
|
3147
|
+
options,
|
|
3148
|
+
runInternal,
|
|
3149
|
+
loopDepth,
|
|
3150
|
+
breakFlag,
|
|
3151
|
+
continueFlag
|
|
3152
|
+
);
|
|
3153
|
+
storeStepOutput(catchStepItem, result, state);
|
|
3154
|
+
catchResult = result;
|
|
3155
|
+
}
|
|
3156
|
+
const outputKey = tryStep.output ?? tryStep.id;
|
|
3157
|
+
state[outputKey] = catchResult;
|
|
3158
|
+
context.logger.debug(`[ControlFlow] Try/catch '${tryStep.id}' caught error and executed catch block`);
|
|
3159
|
+
return { result: catchResult, shouldBreak: false, shouldContinue: false };
|
|
3160
|
+
}
|
|
3161
|
+
}
|
|
3162
|
+
const stepDef = step;
|
|
3163
|
+
const stepResult = await executeStep(stepDef, workflowId, input, state, context, options, runInternal);
|
|
3164
|
+
return { result: stepResult, shouldBreak: false, shouldContinue: false };
|
|
3165
|
+
}
|
|
1967
3166
|
function createDynamicWorkflow(definition, options = {}) {
|
|
1968
3167
|
if (typeof definition === "string") {
|
|
1969
3168
|
const res = parseDynamicWorkflowDefinition(definition);
|
|
@@ -1972,36 +3171,49 @@ function createDynamicWorkflow(definition, options = {}) {
|
|
|
1972
3171
|
}
|
|
1973
3172
|
definition = res.definition;
|
|
1974
3173
|
}
|
|
1975
|
-
const compiledSteps = /* @__PURE__ */ new Map();
|
|
1976
3174
|
const runInternal = async (workflowId, input, context, inheritedState) => {
|
|
1977
3175
|
const workflow = definition.workflows[workflowId];
|
|
1978
3176
|
if (!workflow) {
|
|
3177
|
+
const builtIn = options.builtInWorkflows?.[workflowId];
|
|
3178
|
+
if (builtIn) {
|
|
3179
|
+
context.logger.info(`[Workflow] Delegating to built-in workflow '${workflowId}'`);
|
|
3180
|
+
return await builtIn(input, context);
|
|
3181
|
+
}
|
|
1979
3182
|
throw new Error(`Workflow '${workflowId}' not found`);
|
|
1980
3183
|
}
|
|
1981
3184
|
const validatedInput = validateAndApplyDefaults(workflowId, workflow, input);
|
|
1982
3185
|
context.logger.info(`[Workflow] Starting workflow '${workflowId}'`);
|
|
1983
3186
|
context.logger.debug(`[Workflow] Input: ${JSON.stringify(validatedInput)}`);
|
|
1984
3187
|
context.logger.debug(`[Workflow] Inherited state: ${JSON.stringify(inheritedState)}`);
|
|
1985
|
-
context.logger.debug(`[Workflow] Steps: ${workflow.steps.map((s) => s.id).join(", ")}`);
|
|
3188
|
+
context.logger.debug(`[Workflow] Steps: ${workflow.steps.map((s) => "id" in s ? s.id : "<control flow>").join(", ")}`);
|
|
1986
3189
|
const state = { ...inheritedState };
|
|
1987
3190
|
let lastOutput;
|
|
3191
|
+
const breakFlag = { value: false };
|
|
3192
|
+
const continueFlag = { value: false };
|
|
1988
3193
|
for (let i = 0; i < workflow.steps.length; i++) {
|
|
1989
3194
|
const stepDef = workflow.steps[i];
|
|
1990
|
-
const
|
|
1991
|
-
context.logger.info(`[Workflow] Step ${i + 1}/${workflow.steps.length}: ${
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
3195
|
+
const stepId = getStepId(stepDef);
|
|
3196
|
+
context.logger.info(`[Workflow] Step ${i + 1}/${workflow.steps.length}: ${stepId}`);
|
|
3197
|
+
const { result } = await executeControlFlowStep(
|
|
3198
|
+
stepDef,
|
|
3199
|
+
workflowId,
|
|
3200
|
+
validatedInput,
|
|
3201
|
+
state,
|
|
3202
|
+
context,
|
|
3203
|
+
options,
|
|
3204
|
+
runInternal,
|
|
3205
|
+
0,
|
|
3206
|
+
// loop depth
|
|
3207
|
+
breakFlag,
|
|
3208
|
+
continueFlag
|
|
2004
3209
|
);
|
|
3210
|
+
lastOutput = result;
|
|
3211
|
+
storeStepOutput(stepDef, result, state);
|
|
3212
|
+
if ("id" in stepDef && stepDef.output) {
|
|
3213
|
+
context.logger.debug(
|
|
3214
|
+
`[Workflow] Step output stored as '${stepDef.output}': ${typeof lastOutput === "object" ? JSON.stringify(lastOutput).substring(0, 200) : lastOutput}`
|
|
3215
|
+
);
|
|
3216
|
+
}
|
|
2005
3217
|
}
|
|
2006
3218
|
context.logger.info(`[Workflow] Completed workflow '${workflowId}'`);
|
|
2007
3219
|
if (workflow.output) {
|
|
@@ -2016,725 +3228,6 @@ function createDynamicWorkflow(definition, options = {}) {
|
|
|
2016
3228
|
};
|
|
2017
3229
|
}
|
|
2018
3230
|
|
|
2019
|
-
// src/workflow/dynamic-generator.workflow.ts
|
|
2020
|
-
import { z as z18 } from "zod";
|
|
2021
|
-
|
|
2022
|
-
// src/workflow/prompts/dynamic-generator-prompts.ts
|
|
2023
|
-
var RUNTIME_CONTEXT_TYPES = `## Runtime context (ctx)
|
|
2024
|
-
\`\`\`ts
|
|
2025
|
-
// Runtime types (for reference)
|
|
2026
|
-
type Logger = {
|
|
2027
|
-
debug: (...args: any[]) => void
|
|
2028
|
-
info: (...args: any[]) => void
|
|
2029
|
-
warn: (...args: any[]) => void
|
|
2030
|
-
error: (...args: any[]) => void
|
|
2031
|
-
}
|
|
2032
|
-
|
|
2033
|
-
type StepFn = {
|
|
2034
|
-
<T>(name: string, fn: () => Promise<T>): Promise<T>
|
|
2035
|
-
<T>(name: string, options: { retry?: number }, fn: () => Promise<T>): Promise<T>
|
|
2036
|
-
}
|
|
2037
|
-
|
|
2038
|
-
type JsonModelMessage = { role: 'system' | 'user' | 'assistant' | 'tool'; content: any }
|
|
2039
|
-
type JsonResponseMessage = { role: 'assistant' | 'tool'; content: any }
|
|
2040
|
-
type ToolSet = Record<string, any>
|
|
2041
|
-
|
|
2042
|
-
type ToolResponseResult =
|
|
2043
|
-
| { type: 'text'; value: string }
|
|
2044
|
-
| { type: 'json'; value: any }
|
|
2045
|
-
| { type: 'error-text'; value: string }
|
|
2046
|
-
| { type: 'error-json'; value: any }
|
|
2047
|
-
| { type: 'content'; value: any[] }
|
|
2048
|
-
|
|
2049
|
-
type ToolResponse =
|
|
2050
|
-
| { type: 'Reply'; message: ToolResponseResult }
|
|
2051
|
-
| { type: 'Exit'; message: string; object?: any }
|
|
2052
|
-
| { type: 'Error'; message: ToolResponseResult }
|
|
2053
|
-
|
|
2054
|
-
type ExitReason =
|
|
2055
|
-
| { type: 'UsageExceeded' }
|
|
2056
|
-
| { type: 'Exit'; message: string; object?: any }
|
|
2057
|
-
| { type: 'Error'; error: { message: string; stack?: string } }
|
|
2058
|
-
|
|
2059
|
-
type FullToolInfo = { name: string; description: string; parameters: any; handler: any }
|
|
2060
|
-
|
|
2061
|
-
type AgentTools = {
|
|
2062
|
-
readFile: (input: { path: string }) => Promise<string | null>
|
|
2063
|
-
writeToFile: (input: { path: string; content: string }) => Promise<void>
|
|
2064
|
-
executeCommand: (input: { command: string; pipe?: boolean; requiresApproval?: boolean } & ({ args: string[]; shell?: false } | { shell: true })) => Promise<{
|
|
2065
|
-
exitCode: number
|
|
2066
|
-
stdout: string
|
|
2067
|
-
stderr: string
|
|
2068
|
-
}>
|
|
2069
|
-
searchFiles: (input: { path: string; regex: string; filePattern?: string }) => Promise<string>
|
|
2070
|
-
listFiles: (input: { path: string; recursive?: boolean; maxCount?: number; includeIgnored?: boolean }) => Promise<string>
|
|
2071
|
-
fetchUrl: (input: { url: string[] }) => Promise<string>
|
|
2072
|
-
askFollowupQuestion: (input: { questions: { prompt: string; options?: string[] }[] }) => Promise<any>
|
|
2073
|
-
// ... and other tools available in the environment
|
|
2074
|
-
}
|
|
2075
|
-
|
|
2076
|
-
// Tools available on ctx.tools in dynamic steps
|
|
2077
|
-
type DynamicWorkflowTools = {
|
|
2078
|
-
// LLM + agent helpers
|
|
2079
|
-
runAgent: (input: {
|
|
2080
|
-
tools: Readonly<FullToolInfo[]>
|
|
2081
|
-
maxToolRoundTrips?: number
|
|
2082
|
-
userMessage: readonly JsonModelMessage[]
|
|
2083
|
-
} & ({ messages: JsonModelMessage[] } | { systemPrompt: string })) => Promise<ExitReason>
|
|
2084
|
-
|
|
2085
|
-
// CLI UX helpers
|
|
2086
|
-
confirm: (input: { message: string }) => Promise<boolean>
|
|
2087
|
-
input: (input: { message: string; default?: string }) => Promise<string>
|
|
2088
|
-
select: (input: { message: string; choices: { name: string; value: string }[] }) => Promise<string>
|
|
2089
|
-
}
|
|
2090
|
-
|
|
2091
|
-
type DynamicStepRuntimeContext = {
|
|
2092
|
-
workflowId: string
|
|
2093
|
-
stepId: string
|
|
2094
|
-
input: Record<string, any>
|
|
2095
|
-
state: Record<string, any>
|
|
2096
|
-
tools: DynamicWorkflowTools
|
|
2097
|
-
agentTools: AgentTools
|
|
2098
|
-
logger: Logger
|
|
2099
|
-
step: StepFn
|
|
2100
|
-
runWorkflow: (workflowId: string, input?: Record<string, any>) => Promise<any>
|
|
2101
|
-
toolInfo?: ReadonlyArray<FullToolInfo>
|
|
2102
|
-
}
|
|
2103
|
-
\`\`\`
|
|
2104
|
-
|
|
2105
|
-
- \`ctx.input\`: workflow inputs (read-only).
|
|
2106
|
-
- \`ctx.state\`: shared state between steps (previous step outputs are stored here).
|
|
2107
|
-
- \`ctx.agentTools\`: standard tools (readFile, executeCommand, etc.). Call as \`await ctx.agentTools.someTool({ ... })\`.
|
|
2108
|
-
- \`ctx.tools\`: workflow helpers (runAgent, confirm, input, select).
|
|
2109
|
-
- \`ctx.runWorkflow\`: run a sub-workflow by id.`;
|
|
2110
|
-
var CONTEXT_USAGE_GUIDELINES = `## Guidelines
|
|
2111
|
-
- Use \`await\` for all async operations.
|
|
2112
|
-
- Return the output value for the step (this becomes the step output).
|
|
2113
|
-
- Access inputs via \`ctx.input.<inputId>\`.
|
|
2114
|
-
- Access previous step outputs via \`ctx.state.<stepOutputKey>\` (defaults to the step \`output\` or \`id\`).`;
|
|
2115
|
-
var QUALITY_GUIDELINES = `## Quality Guidelines for Code Implementation
|
|
2116
|
-
|
|
2117
|
-
### Error Handling
|
|
2118
|
-
- ALWAYS validate inputs at the start of steps
|
|
2119
|
-
- Use try-catch for operations that might fail (file I/O, parsing, API calls)
|
|
2120
|
-
- Preserve stack traces: re-throw original errors rather than creating new ones
|
|
2121
|
-
- Use error type guards: \`const err = error instanceof Error ? error : new Error(String(error))\`
|
|
2122
|
-
- Check for null/undefined before using values
|
|
2123
|
-
- Handle edge cases (empty arrays, missing files, invalid data)
|
|
2124
|
-
|
|
2125
|
-
### Logging
|
|
2126
|
-
- Use \`ctx.logger.info()\` for important progress updates
|
|
2127
|
-
- Use \`ctx.logger.debug()\` for detailed information
|
|
2128
|
-
- Use \`ctx.logger.warn()\` for recoverable issues
|
|
2129
|
-
- Use \`ctx.logger.error()\` before throwing errors
|
|
2130
|
-
- Log when starting and completing significant operations
|
|
2131
|
-
- Use template literals for readability: \`ctx.logger.info(\\\`Processing \${items.length} items...\\\`)\`
|
|
2132
|
-
|
|
2133
|
-
### User Experience
|
|
2134
|
-
- Provide progress feedback for long operations
|
|
2135
|
-
- Return structured data (objects/arrays), not strings when possible
|
|
2136
|
-
- Include helpful metadata in results (counts, timestamps, status)
|
|
2137
|
-
- For batch operations, report progress: \`Processed 5/10 items\`
|
|
2138
|
-
|
|
2139
|
-
### Data Validation
|
|
2140
|
-
- Validate required fields exist before accessing
|
|
2141
|
-
- Check data types match expectations
|
|
2142
|
-
- Validate array lengths before iteration
|
|
2143
|
-
- Example: \`if (!data?.users || !Array.isArray(data.users)) throw new Error('Invalid data format')\`
|
|
2144
|
-
|
|
2145
|
-
### Best Practices
|
|
2146
|
-
- Use meaningful variable names
|
|
2147
|
-
- Avoid nested callbacks - use async/await
|
|
2148
|
-
- Clean up resources (close files, clear timeouts)
|
|
2149
|
-
- Return consistent data structures across similar steps
|
|
2150
|
-
- For iteration, consider batching or rate limiting
|
|
2151
|
-
|
|
2152
|
-
### When to Simplify
|
|
2153
|
-
- Simple transformation steps (e.g., formatting strings) need only basic error handling
|
|
2154
|
-
- Internal sub-workflow steps with validated inputs from parent can skip redundant validation
|
|
2155
|
-
- Minimal logging is fine for fast steps (<100ms) that don't perform I/O or external calls
|
|
2156
|
-
- Use judgment: match error handling complexity to the step's failure risk and impact`;
|
|
2157
|
-
var TOOL_CALLING_EXAMPLES = `## Tool calling examples
|
|
2158
|
-
|
|
2159
|
-
### Standard tools (ctx.agentTools)
|
|
2160
|
-
\`\`\`ts
|
|
2161
|
-
// readFile
|
|
2162
|
-
const readme = await ctx.agentTools.readFile({ path: 'README.md' })
|
|
2163
|
-
if (readme == null) throw new Error('README.md not found')
|
|
2164
|
-
|
|
2165
|
-
// writeToFile
|
|
2166
|
-
await ctx.agentTools.writeToFile({ path: 'notes.txt', content: 'hello\\n' })
|
|
2167
|
-
|
|
2168
|
-
// executeCommand (args form)
|
|
2169
|
-
const rg = await ctx.agentTools.executeCommand({ command: 'rg', args: ['-n', 'TODO', '.'] })
|
|
2170
|
-
if (rg.exitCode !== 0) throw new Error(rg.stderr)
|
|
2171
|
-
|
|
2172
|
-
// executeCommand (shell form)
|
|
2173
|
-
await ctx.agentTools.executeCommand({ command: 'ls -la', shell: true, pipe: true })
|
|
2174
|
-
\`\`\`
|
|
2175
|
-
|
|
2176
|
-
### Workflow helpers (ctx.tools)
|
|
2177
|
-
\`\`\`ts
|
|
2178
|
-
// runAgent (nested agent; use ctx.toolInfo as the tool list)
|
|
2179
|
-
const agentRes = await ctx.tools.runAgent({
|
|
2180
|
-
systemPrompt: 'You are a helpful assistant.',
|
|
2181
|
-
userMessage: [{ role: 'user', content: 'Summarize README.md in 3 bullets.' }],
|
|
2182
|
-
tools: (ctx.toolInfo ?? []) as any,
|
|
2183
|
-
})
|
|
2184
|
-
if (agentRes.type !== 'Exit') throw new Error('runAgent failed')
|
|
2185
|
-
|
|
2186
|
-
// confirm / input / select (interactive)
|
|
2187
|
-
const ok = await ctx.tools.confirm({ message: 'Proceed?' })
|
|
2188
|
-
const name = await ctx.tools.input({ message: 'Name?', default: 'main' })
|
|
2189
|
-
const flavor = await ctx.tools.select({
|
|
2190
|
-
message: 'Pick one',
|
|
2191
|
-
choices: [
|
|
2192
|
-
{ name: 'A', value: 'a' },
|
|
2193
|
-
{ name: 'B', value: 'b' },
|
|
2194
|
-
],
|
|
2195
|
-
})
|
|
2196
|
-
\`\`\`
|
|
2197
|
-
|
|
2198
|
-
### Sub-workflow example (ctx.runWorkflow)
|
|
2199
|
-
\`\`\`ts
|
|
2200
|
-
const results: any[] = []
|
|
2201
|
-
for (const pr of ctx.state.prs ?? []) {
|
|
2202
|
-
results.push(await ctx.runWorkflow('reviewPR', { prId: pr.id }))
|
|
2203
|
-
}
|
|
2204
|
-
return results
|
|
2205
|
-
\`\`\``;
|
|
2206
|
-
var COMPLETE_STEP_EXAMPLE = `## Complete Example: High-Quality Step Implementation
|
|
2207
|
-
|
|
2208
|
-
This example demonstrates all quality guidelines in a single step:
|
|
2209
|
-
|
|
2210
|
-
\`\`\`ts
|
|
2211
|
-
// Step: processUserData
|
|
2212
|
-
// Task: Read, validate, and process user data from a file
|
|
2213
|
-
|
|
2214
|
-
// Input validation
|
|
2215
|
-
if (!ctx.input.dataFile) {
|
|
2216
|
-
throw new Error('Missing required input: dataFile')
|
|
2217
|
-
}
|
|
2218
|
-
|
|
2219
|
-
ctx.logger.info(\`Starting user data processing for: \${ctx.input.dataFile}\`)
|
|
2220
|
-
|
|
2221
|
-
// Read file with error handling
|
|
2222
|
-
let rawData
|
|
2223
|
-
try {
|
|
2224
|
-
ctx.logger.debug(\`Reading file: \${ctx.input.dataFile}\`)
|
|
2225
|
-
rawData = await ctx.agentTools.readFile({ path: ctx.input.dataFile })
|
|
2226
|
-
|
|
2227
|
-
if (!rawData) {
|
|
2228
|
-
throw new Error(\`File not found or empty: \${ctx.input.dataFile}\`)
|
|
2229
|
-
}
|
|
2230
|
-
} catch (error) {
|
|
2231
|
-
const err = error instanceof Error ? error : new Error(String(error))
|
|
2232
|
-
ctx.logger.error(\`Failed to read file: \${err.message}\`)
|
|
2233
|
-
throw err // Preserve original stack trace
|
|
2234
|
-
}
|
|
2235
|
-
|
|
2236
|
-
// Parse and validate data
|
|
2237
|
-
let users
|
|
2238
|
-
try {
|
|
2239
|
-
ctx.logger.debug('Parsing JSON data')
|
|
2240
|
-
const parsed = JSON.parse(rawData)
|
|
2241
|
-
|
|
2242
|
-
if (!parsed?.users || !Array.isArray(parsed.users)) {
|
|
2243
|
-
throw new Error('Invalid data format: expected {users: [...]}')
|
|
2244
|
-
}
|
|
2245
|
-
|
|
2246
|
-
users = parsed.users
|
|
2247
|
-
ctx.logger.info(\`Found \${users.length} users to process\`)
|
|
2248
|
-
} catch (error) {
|
|
2249
|
-
const err = error instanceof Error ? error : new Error(String(error))
|
|
2250
|
-
ctx.logger.error(\`Data parsing failed: \${err.message}\`)
|
|
2251
|
-
throw err // Preserve original stack trace
|
|
2252
|
-
}
|
|
2253
|
-
|
|
2254
|
-
// Process each user with progress reporting
|
|
2255
|
-
const results = []
|
|
2256
|
-
for (let i = 0; i < users.length; i++) {
|
|
2257
|
-
const user = users[i]
|
|
2258
|
-
|
|
2259
|
-
// Validate each user object
|
|
2260
|
-
if (!user?.id || !user?.email) {
|
|
2261
|
-
ctx.logger.warn(\`Skipping invalid user at index \${i}: missing id or email\`)
|
|
2262
|
-
continue
|
|
2263
|
-
}
|
|
2264
|
-
|
|
2265
|
-
// Process user
|
|
2266
|
-
const processed = {
|
|
2267
|
-
id: user.id,
|
|
2268
|
-
email: user.email.toLowerCase().trim(),
|
|
2269
|
-
name: user.name?.trim() || 'Unknown',
|
|
2270
|
-
processedAt: new Date().toISOString(),
|
|
2271
|
-
status: 'active'
|
|
2272
|
-
}
|
|
2273
|
-
|
|
2274
|
-
results.push(processed)
|
|
2275
|
-
|
|
2276
|
-
// Progress feedback every 10 items
|
|
2277
|
-
if ((i + 1) % 10 === 0) {
|
|
2278
|
-
ctx.logger.info(\`Processed \${i + 1}/\${users.length} users\`)
|
|
2279
|
-
}
|
|
2280
|
-
}
|
|
2281
|
-
|
|
2282
|
-
ctx.logger.info(\`Successfully processed \${results.length}/\${users.length} users\`)
|
|
2283
|
-
|
|
2284
|
-
// Return structured result with metadata
|
|
2285
|
-
return {
|
|
2286
|
-
users: results,
|
|
2287
|
-
metadata: {
|
|
2288
|
-
totalInput: users.length,
|
|
2289
|
-
totalProcessed: results.length,
|
|
2290
|
-
skipped: users.length - results.length,
|
|
2291
|
-
processedAt: new Date().toISOString()
|
|
2292
|
-
}
|
|
2293
|
-
}
|
|
2294
|
-
\`\`\`
|
|
2295
|
-
|
|
2296
|
-
Key features demonstrated:
|
|
2297
|
-
- Input validation at start
|
|
2298
|
-
- Comprehensive error handling with try-catch that preserves stack traces
|
|
2299
|
-
- Logging at info, debug, warn, and error levels
|
|
2300
|
-
- Progress reporting for long operations (every 10 items)
|
|
2301
|
-
- Data validation throughout (null checks, type checks, array validation)
|
|
2302
|
-
- Structured return value with metadata for observability
|
|
2303
|
-
- Descriptive error messages with context
|
|
2304
|
-
- Meaningful variable names (rawData, users, processed)
|
|
2305
|
-
- Clean async/await usage
|
|
2306
|
-
- Template literals for readable string interpolation
|
|
2307
|
-
- Proper error type guards (error instanceof Error)`;
|
|
2308
|
-
var CODE_FIELD_CONSTRAINTS = `REMEMBER: The "code" field must be ONLY the function body statements.
|
|
2309
|
-
- DO NOT wrap code in arrow functions: \`(ctx) => { ... }\`
|
|
2310
|
-
- DO NOT wrap code in async functions: \`async (ctx) => { ... }\`
|
|
2311
|
-
- DO NOT include outer curly braces
|
|
2312
|
-
- DO include a return statement if the step should produce output
|
|
2313
|
-
- Each "code" field should be a string containing multiple statements separated by newlines`;
|
|
2314
|
-
function composeImplementationGuidelines() {
|
|
2315
|
-
return [
|
|
2316
|
-
RUNTIME_CONTEXT_TYPES,
|
|
2317
|
-
"",
|
|
2318
|
-
CONTEXT_USAGE_GUIDELINES,
|
|
2319
|
-
"",
|
|
2320
|
-
QUALITY_GUIDELINES,
|
|
2321
|
-
"",
|
|
2322
|
-
TOOL_CALLING_EXAMPLES,
|
|
2323
|
-
"",
|
|
2324
|
-
COMPLETE_STEP_EXAMPLE
|
|
2325
|
-
].join("\n");
|
|
2326
|
-
}
|
|
2327
|
-
|
|
2328
|
-
// src/workflow/dynamic-generator.workflow.ts
|
|
2329
|
-
var GenerateWorkflowDefinitionInputSchema = z18.object({
|
|
2330
|
-
prompt: z18.string(),
|
|
2331
|
-
availableTools: z18.array(
|
|
2332
|
-
z18.object({
|
|
2333
|
-
name: z18.string(),
|
|
2334
|
-
description: z18.string()
|
|
2335
|
-
})
|
|
2336
|
-
).nullish()
|
|
2337
|
-
});
|
|
2338
|
-
var GenerateWorkflowCodeInputSchema = z18.object({
|
|
2339
|
-
workflow: WorkflowFileSchema,
|
|
2340
|
-
skipReview: z18.boolean().nullish()
|
|
2341
|
-
});
|
|
2342
|
-
var AsyncFunction2 = Object.getPrototypeOf(async () => {
|
|
2343
|
-
}).constructor;
|
|
2344
|
-
function validateWorkflowDefinition(workflow) {
|
|
2345
|
-
const errors = [];
|
|
2346
|
-
if (!workflow.workflows.main) {
|
|
2347
|
-
errors.push("Missing required 'main' workflow");
|
|
2348
|
-
}
|
|
2349
|
-
for (const [wfId, wf] of Object.entries(workflow.workflows)) {
|
|
2350
|
-
const stepIds = /* @__PURE__ */ new Set();
|
|
2351
|
-
for (const step of wf.steps) {
|
|
2352
|
-
if (stepIds.has(step.id)) {
|
|
2353
|
-
errors.push(`Duplicate step ID '${step.id}' in workflow '${wfId}'`);
|
|
2354
|
-
}
|
|
2355
|
-
stepIds.add(step.id);
|
|
2356
|
-
}
|
|
2357
|
-
}
|
|
2358
|
-
return { valid: errors.length === 0, errors };
|
|
2359
|
-
}
|
|
2360
|
-
function validateWorkflowCodeSyntax(workflow) {
|
|
2361
|
-
const errors = [];
|
|
2362
|
-
for (const [wfId, wf] of Object.entries(workflow.workflows)) {
|
|
2363
|
-
for (const step of wf.steps) {
|
|
2364
|
-
if (step.code) {
|
|
2365
|
-
try {
|
|
2366
|
-
new AsyncFunction2("ctx", step.code);
|
|
2367
|
-
} catch (e) {
|
|
2368
|
-
const errorMsg = e instanceof Error ? e.message : String(e);
|
|
2369
|
-
errors.push(`Syntax error in ${wfId}.${step.id}: ${errorMsg}`);
|
|
2370
|
-
}
|
|
2371
|
-
}
|
|
2372
|
-
}
|
|
2373
|
-
}
|
|
2374
|
-
return { valid: errors.length === 0, errors };
|
|
2375
|
-
}
|
|
2376
|
-
var WORKFLOW_DEFINITION_SYSTEM_PROMPT = `You are an expert workflow architect.
|
|
2377
|
-
Your task is to create a JSON workflow definition based on the user's request.
|
|
2378
|
-
|
|
2379
|
-
The workflow definition must follow this structure:
|
|
2380
|
-
{
|
|
2381
|
-
"workflows": {
|
|
2382
|
-
"workflowName": {
|
|
2383
|
-
"task": "Description of the workflow",
|
|
2384
|
-
"inputs": [
|
|
2385
|
-
{ "id": "inputName", "description": "Description", "default": "optionalDefault" }
|
|
2386
|
-
],
|
|
2387
|
-
"steps": [
|
|
2388
|
-
{
|
|
2389
|
-
"id": "stepId",
|
|
2390
|
-
"task": "Description of the step",
|
|
2391
|
-
"tools": ["toolName1", "toolName2"], // Optional: restrict which tools can be used. Can use groups: "readonly", "readwrite", "internet", "all".
|
|
2392
|
-
"output": "outputVariableName", // Optional: defaults to step id
|
|
2393
|
-
"timeout": 30000, // Optional: timeout in milliseconds
|
|
2394
|
-
"expected_outcome": "What this step produces", // Optional: documentation for expected results
|
|
2395
|
-
"outputSchema": { "type": "object" } // Optional: validation schema
|
|
2396
|
-
}
|
|
2397
|
-
],
|
|
2398
|
-
"output": "outputVariableName" // Optional
|
|
2399
|
-
}
|
|
2400
|
-
}
|
|
2401
|
-
}
|
|
2402
|
-
|
|
2403
|
-
Constraints:
|
|
2404
|
-
- You MUST always include a workflow named 'main'. This is the entry point.
|
|
2405
|
-
- The 'main' workflow input must be either empty (no input) or a single string input.
|
|
2406
|
-
- Break down complex tasks into logical steps.
|
|
2407
|
-
- Define clear inputs and outputs.
|
|
2408
|
-
|
|
2409
|
-
Quality Guidelines:
|
|
2410
|
-
- Add "timeout" field (in milliseconds) for steps that might take long (file I/O, API calls, searches)
|
|
2411
|
-
- Use "expected_outcome" field to document what each step should produce and its format
|
|
2412
|
-
- Use descriptive step IDs (e.g., "validateInput", "fetchUserData", not "step1", "step2")
|
|
2413
|
-
- Design steps to be focused - one responsibility per step
|
|
2414
|
-
- For steps that process multiple items, consider creating a sub-workflow
|
|
2415
|
-
- Add "outputSchema" with type information for validation-critical steps
|
|
2416
|
-
- Order steps logically with clear data flow
|
|
2417
|
-
|
|
2418
|
-
### Using expected_outcome Effectively
|
|
2419
|
-
|
|
2420
|
-
The "expected_outcome" field helps document what each step produces. Best practices:
|
|
2421
|
-
- Describe the data structure: "Returns an array of { id, name, status } objects"
|
|
2422
|
-
- Mention important constraints: "Returns at most 10 results, sorted by date"
|
|
2423
|
-
- Note failure modes: "Returns null if file not found"
|
|
2424
|
-
- Document side effects: "Creates output directory if it doesn't exist"
|
|
2425
|
-
|
|
2426
|
-
Example 1 - Research workflow:
|
|
2427
|
-
User: "Research a topic and summarize it."
|
|
2428
|
-
Output:
|
|
2429
|
-
\`\`\`json
|
|
2430
|
-
{
|
|
2431
|
-
"workflows": {
|
|
2432
|
-
"main": {
|
|
2433
|
-
"task": "Research a topic and provide a summary",
|
|
2434
|
-
"inputs": [
|
|
2435
|
-
{ "id": "topic", "description": "The topic to research" }
|
|
2436
|
-
],
|
|
2437
|
-
"steps": [
|
|
2438
|
-
{
|
|
2439
|
-
"id": "search",
|
|
2440
|
-
"task": "Search for information about the topic",
|
|
2441
|
-
"tools": ["search"],
|
|
2442
|
-
"output": "searchResults",
|
|
2443
|
-
"timeout": 30000,
|
|
2444
|
-
"expected_outcome": "Returns search results with titles, URLs, and snippets related to the topic"
|
|
2445
|
-
},
|
|
2446
|
-
{
|
|
2447
|
-
"id": "summarize",
|
|
2448
|
-
"task": "Summarize the search results",
|
|
2449
|
-
"tools": ["runAgent"],
|
|
2450
|
-
"output": "summary",
|
|
2451
|
-
"expected_outcome": "Returns a concise summary string (2-3 paragraphs) of the key findings"
|
|
2452
|
-
}
|
|
2453
|
-
],
|
|
2454
|
-
"output": "summary"
|
|
2455
|
-
}
|
|
2456
|
-
}
|
|
2457
|
-
}
|
|
2458
|
-
\`\`\`
|
|
2459
|
-
|
|
2460
|
-
Example 2 - PR review workflow with sub-workflow:
|
|
2461
|
-
User: "Review urgent PRs. For each PR, run the review workflow."
|
|
2462
|
-
Output:
|
|
2463
|
-
\`\`\`json
|
|
2464
|
-
{
|
|
2465
|
-
"workflows": {
|
|
2466
|
-
"main": {
|
|
2467
|
-
"task": "Fetch urgent PRs and review them",
|
|
2468
|
-
"inputs": [],
|
|
2469
|
-
"steps": [
|
|
2470
|
-
{
|
|
2471
|
-
"id": "fetchPRs",
|
|
2472
|
-
"task": "Fetch list of urgent PRs",
|
|
2473
|
-
"tools": ["github_list_prs"],
|
|
2474
|
-
"output": "prs",
|
|
2475
|
-
"timeout": 15000,
|
|
2476
|
-
"expected_outcome": "Returns array of PR objects with { id, title, author, url }"
|
|
2477
|
-
},
|
|
2478
|
-
{
|
|
2479
|
-
"id": "reviewEachPR",
|
|
2480
|
-
"task": "Run review workflow for each PR",
|
|
2481
|
-
"tools": [],
|
|
2482
|
-
"output": "reviews",
|
|
2483
|
-
"expected_outcome": "Returns array of review results, one per PR"
|
|
2484
|
-
}
|
|
2485
|
-
],
|
|
2486
|
-
"output": "reviews"
|
|
2487
|
-
},
|
|
2488
|
-
"reviewPR": {
|
|
2489
|
-
"task": "Review a single PR",
|
|
2490
|
-
"inputs": [
|
|
2491
|
-
{ "id": "prId", "description": "ID of the PR to review" }
|
|
2492
|
-
],
|
|
2493
|
-
"steps": [
|
|
2494
|
-
{
|
|
2495
|
-
"id": "getDiff",
|
|
2496
|
-
"task": "Get PR diff",
|
|
2497
|
-
"tools": ["github_get_diff"],
|
|
2498
|
-
"output": "diff",
|
|
2499
|
-
"timeout": 10000,
|
|
2500
|
-
"expected_outcome": "Returns the unified diff string for the PR"
|
|
2501
|
-
},
|
|
2502
|
-
{
|
|
2503
|
-
"id": "analyze",
|
|
2504
|
-
"task": "Analyze the diff and provide feedback",
|
|
2505
|
-
"tools": ["runAgent"],
|
|
2506
|
-
"output": "analysis",
|
|
2507
|
-
"expected_outcome": "Returns { summary, issues, suggestions } object with review feedback"
|
|
2508
|
-
}
|
|
2509
|
-
],
|
|
2510
|
-
"output": "analysis"
|
|
2511
|
-
}
|
|
2512
|
-
}
|
|
2513
|
-
}
|
|
2514
|
-
\`\`\`
|
|
2515
|
-
|
|
2516
|
-
Example 3 - File processing with conditional logic:
|
|
2517
|
-
User: "Process all JSON files in a directory, validate them, and generate a report."
|
|
2518
|
-
Output:
|
|
2519
|
-
\`\`\`json
|
|
2520
|
-
{
|
|
2521
|
-
"workflows": {
|
|
2522
|
-
"main": {
|
|
2523
|
-
"task": "Process and validate JSON files, then generate a report",
|
|
2524
|
-
"inputs": [
|
|
2525
|
-
{ "id": "directory", "description": "Directory containing JSON files", "default": "data" }
|
|
2526
|
-
],
|
|
2527
|
-
"steps": [
|
|
2528
|
-
{
|
|
2529
|
-
"id": "listJsonFiles",
|
|
2530
|
-
"task": "List all JSON files in the directory",
|
|
2531
|
-
"tools": ["listFiles"],
|
|
2532
|
-
"output": "jsonFiles",
|
|
2533
|
-
"timeout": 5000,
|
|
2534
|
-
"expected_outcome": "Returns array of file paths ending in .json"
|
|
2535
|
-
},
|
|
2536
|
-
{
|
|
2537
|
-
"id": "processFiles",
|
|
2538
|
-
"task": "Process each JSON file using the processFile sub-workflow",
|
|
2539
|
-
"tools": [],
|
|
2540
|
-
"output": "processedResults",
|
|
2541
|
-
"expected_outcome": "Returns array of { file, valid, errors?, data? } for each file"
|
|
2542
|
-
},
|
|
2543
|
-
{
|
|
2544
|
-
"id": "generateReport",
|
|
2545
|
-
"task": "Generate a summary report of all processed files",
|
|
2546
|
-
"tools": ["writeToFile"],
|
|
2547
|
-
"output": "reportPath",
|
|
2548
|
-
"expected_outcome": "Writes report to 'report.md' and returns the file path"
|
|
2549
|
-
}
|
|
2550
|
-
],
|
|
2551
|
-
"output": "reportPath"
|
|
2552
|
-
},
|
|
2553
|
-
"processFile": {
|
|
2554
|
-
"task": "Process and validate a single JSON file",
|
|
2555
|
-
"inputs": [
|
|
2556
|
-
{ "id": "filePath", "description": "Path to the JSON file" }
|
|
2557
|
-
],
|
|
2558
|
-
"steps": [
|
|
2559
|
-
{
|
|
2560
|
-
"id": "readFile",
|
|
2561
|
-
"task": "Read the JSON file content",
|
|
2562
|
-
"tools": ["readFile"],
|
|
2563
|
-
"output": "content",
|
|
2564
|
-
"timeout": 3000,
|
|
2565
|
-
"expected_outcome": "Returns file content as string, or null if not found"
|
|
2566
|
-
},
|
|
2567
|
-
{
|
|
2568
|
-
"id": "validateJson",
|
|
2569
|
-
"task": "Parse and validate the JSON structure",
|
|
2570
|
-
"tools": [],
|
|
2571
|
-
"output": "validationResult",
|
|
2572
|
-
"expected_outcome": "Returns { valid: boolean, data?: object, errors?: string[] }"
|
|
2573
|
-
}
|
|
2574
|
-
],
|
|
2575
|
-
"output": "validationResult"
|
|
2576
|
-
}
|
|
2577
|
-
}
|
|
2578
|
-
}
|
|
2579
|
-
\`\`\`
|
|
2580
|
-
`;
|
|
2581
|
-
var WORKFLOW_IMPLEMENTATION_GUIDELINES = composeImplementationGuidelines();
|
|
2582
|
-
var WORKFLOW_CODE_SYSTEM_PROMPT = `You are an expert TypeScript developer.
|
|
2583
|
-
Your task is to implement the TypeScript code for the steps in the provided workflow definition.
|
|
2584
|
-
|
|
2585
|
-
You will receive a JSON workflow definition where the "code" field is null.
|
|
2586
|
-
You must fill in the "code" field for each step with valid TypeScript code.
|
|
2587
|
-
|
|
2588
|
-
CRITICAL: Each step "code" field must contain ONLY the function body statements (the code inside the curly braces).
|
|
2589
|
-
DO NOT include function declaration, arrow function syntax, async keyword, parameter list, or outer curly braces.
|
|
2590
|
-
|
|
2591
|
-
Prefer using \`ctx.tools.runAgent\` for complex tasks or when multiple steps/tools are needed. Use \`ctx.agentTools\` for direct tool usage (e.g. \`ctx.agentTools.readFile\`).
|
|
2592
|
-
|
|
2593
|
-
The code will be wrapped automatically in: \`async (ctx) => { YOUR_CODE_HERE }\`
|
|
2594
|
-
|
|
2595
|
-
Example of CORRECT code field:
|
|
2596
|
-
\`\`\`ts
|
|
2597
|
-
const result = await ctx.agentTools.readFile({ path: 'README.md' })
|
|
2598
|
-
if (!result) throw new Error('File not found')
|
|
2599
|
-
return result
|
|
2600
|
-
\`\`\`
|
|
2601
|
-
|
|
2602
|
-
Example of INCORRECT code field (DO NOT DO THIS):
|
|
2603
|
-
\`\`\`ts
|
|
2604
|
-
async (ctx) => {
|
|
2605
|
-
const result = await ctx.agentTools.readFile({ path: 'README.md' })
|
|
2606
|
-
return result
|
|
2607
|
-
}
|
|
2608
|
-
\`\`\`
|
|
2609
|
-
|
|
2610
|
-
${WORKFLOW_IMPLEMENTATION_GUIDELINES}
|
|
2611
|
-
|
|
2612
|
-
## Final Instructions
|
|
2613
|
-
|
|
2614
|
-
${CODE_FIELD_CONSTRAINTS}
|
|
2615
|
-
|
|
2616
|
-
Return the complete workflow JSON with the "code" fields populated.
|
|
2617
|
-
`;
|
|
2618
|
-
var WORKFLOW_REVIEW_SYSTEM_PROMPT = `You are an expert TypeScript Code Reviewer.
|
|
2619
|
-
Your task is to review the provided workflow definition and its implemented code, and improve it to meet the highest quality standards.
|
|
2620
|
-
|
|
2621
|
-
You will receive a JSON workflow definition where the "code" fields are already populated.
|
|
2622
|
-
You must review each step's code and improve it if necessary.
|
|
2623
|
-
|
|
2624
|
-
Check for:
|
|
2625
|
-
- Correct usage of \`ctx.agentTools\` (for standard tools) and \`ctx.tools\` (for workflow helpers).
|
|
2626
|
-
- Proper error handling (try-catch, input validation).
|
|
2627
|
-
- Meaningful logging.
|
|
2628
|
-
- Adherence to the Quality Guidelines.
|
|
2629
|
-
- Correct syntax (no outer function wrappers).
|
|
2630
|
-
|
|
2631
|
-
${QUALITY_GUIDELINES}
|
|
2632
|
-
|
|
2633
|
-
## Final Instructions
|
|
2634
|
-
|
|
2635
|
-
Return the complete workflow JSON with the "code" fields improved where necessary.
|
|
2636
|
-
Ensure the "code" field still contains ONLY the function body statements.
|
|
2637
|
-
`;
|
|
2638
|
-
var MAX_GENERATION_ATTEMPTS = 3;
|
|
2639
|
-
var generateWorkflowDefinitionWorkflow = async (input, ctx) => {
|
|
2640
|
-
let systemPrompt = WORKFLOW_DEFINITION_SYSTEM_PROMPT;
|
|
2641
|
-
if (input.availableTools && input.availableTools.length > 0) {
|
|
2642
|
-
const toolsList = input.availableTools.map((t) => `- ${t.name}: ${t.description}`).join("\n");
|
|
2643
|
-
systemPrompt += `
|
|
2644
|
-
|
|
2645
|
-
Available Tools:
|
|
2646
|
-
${toolsList}
|
|
2647
|
-
|
|
2648
|
-
Use these tools when appropriate.`;
|
|
2649
|
-
}
|
|
2650
|
-
const result = await ctx.step("generate-workflow-definition", async () => {
|
|
2651
|
-
return agentWorkflow(
|
|
2652
|
-
{
|
|
2653
|
-
systemPrompt,
|
|
2654
|
-
userMessage: [{ role: "user", content: input.prompt }],
|
|
2655
|
-
tools: [],
|
|
2656
|
-
outputSchema: WorkflowFileSchema
|
|
2657
|
-
},
|
|
2658
|
-
ctx
|
|
2659
|
-
);
|
|
2660
|
-
});
|
|
2661
|
-
if (result.type !== "Exit" || !result.object) {
|
|
2662
|
-
throw new Error("Failed to generate workflow definition");
|
|
2663
|
-
}
|
|
2664
|
-
const workflow = result.object;
|
|
2665
|
-
await ctx.step("validate-workflow-definition", async () => {
|
|
2666
|
-
const validation = validateWorkflowDefinition(workflow);
|
|
2667
|
-
if (!validation.valid) {
|
|
2668
|
-
ctx.logger.warn(`Workflow definition validation warnings: ${validation.errors.join("; ")}`);
|
|
2669
|
-
}
|
|
2670
|
-
return validation;
|
|
2671
|
-
});
|
|
2672
|
-
return workflow;
|
|
2673
|
-
};
|
|
2674
|
-
var generateWorkflowCodeWorkflow = async (input, ctx) => {
|
|
2675
|
-
let lastError = null;
|
|
2676
|
-
let currentWorkflow = input.workflow;
|
|
2677
|
-
for (let attempt = 0; attempt < MAX_GENERATION_ATTEMPTS; attempt++) {
|
|
2678
|
-
const stepName = attempt === 0 ? "generate-workflow-code" : `retry-workflow-code-${attempt}`;
|
|
2679
|
-
const userMessage = lastError ? `Previous attempt had issues: ${lastError}
|
|
2680
|
-
|
|
2681
|
-
Please fix the problems in this workflow:
|
|
2682
|
-
${JSON.stringify(currentWorkflow, null, 2)}` : JSON.stringify(currentWorkflow, null, 2);
|
|
2683
|
-
const generated = await ctx.step(stepName, async () => {
|
|
2684
|
-
return agentWorkflow(
|
|
2685
|
-
{
|
|
2686
|
-
systemPrompt: WORKFLOW_CODE_SYSTEM_PROMPT,
|
|
2687
|
-
userMessage: [{ role: "user", content: userMessage }],
|
|
2688
|
-
tools: [],
|
|
2689
|
-
outputSchema: WorkflowFileSchema
|
|
2690
|
-
},
|
|
2691
|
-
ctx
|
|
2692
|
-
);
|
|
2693
|
-
});
|
|
2694
|
-
if (generated.type !== "Exit" || !generated.object) {
|
|
2695
|
-
lastError = "Failed to generate workflow code";
|
|
2696
|
-
continue;
|
|
2697
|
-
}
|
|
2698
|
-
const generatedWorkflow = generated.object;
|
|
2699
|
-
const syntaxValidation = await ctx.step(`validate-code-syntax-${attempt}`, async () => {
|
|
2700
|
-
return validateWorkflowCodeSyntax(generatedWorkflow);
|
|
2701
|
-
});
|
|
2702
|
-
if (!syntaxValidation.valid) {
|
|
2703
|
-
lastError = syntaxValidation.errors.join("; ");
|
|
2704
|
-
currentWorkflow = generatedWorkflow;
|
|
2705
|
-
ctx.logger.warn(`Code syntax validation failed (attempt ${attempt + 1}): ${lastError}`);
|
|
2706
|
-
continue;
|
|
2707
|
-
}
|
|
2708
|
-
if (input.skipReview) {
|
|
2709
|
-
return generatedWorkflow;
|
|
2710
|
-
}
|
|
2711
|
-
const reviewed = await ctx.step("review-workflow-code", async () => {
|
|
2712
|
-
return agentWorkflow(
|
|
2713
|
-
{
|
|
2714
|
-
systemPrompt: WORKFLOW_REVIEW_SYSTEM_PROMPT,
|
|
2715
|
-
userMessage: [{ role: "user", content: JSON.stringify(generatedWorkflow, null, 2) }],
|
|
2716
|
-
tools: [],
|
|
2717
|
-
outputSchema: WorkflowFileSchema
|
|
2718
|
-
},
|
|
2719
|
-
ctx
|
|
2720
|
-
);
|
|
2721
|
-
});
|
|
2722
|
-
if (reviewed.type !== "Exit" || !reviewed.object) {
|
|
2723
|
-
throw new Error("Failed to review workflow code");
|
|
2724
|
-
}
|
|
2725
|
-
const reviewedWorkflow = reviewed.object;
|
|
2726
|
-
const reviewSyntaxValidation = await ctx.step("validate-reviewed-code-syntax", async () => {
|
|
2727
|
-
return validateWorkflowCodeSyntax(reviewedWorkflow);
|
|
2728
|
-
});
|
|
2729
|
-
if (!reviewSyntaxValidation.valid) {
|
|
2730
|
-
ctx.logger.warn(`Reviewed code has syntax issues: ${reviewSyntaxValidation.errors.join("; ")}`);
|
|
2731
|
-
return generatedWorkflow;
|
|
2732
|
-
}
|
|
2733
|
-
return reviewedWorkflow;
|
|
2734
|
-
}
|
|
2735
|
-
throw new Error(`Failed to generate valid workflow code after ${MAX_GENERATION_ATTEMPTS} attempts: ${lastError}`);
|
|
2736
|
-
};
|
|
2737
|
-
|
|
2738
3231
|
// src/workflow/json-ai-types.ts
|
|
2739
3232
|
var toJsonDataContent = (data) => {
|
|
2740
3233
|
if (data instanceof URL) {
|
|
@@ -2933,16 +3426,35 @@ var makeStepFn = () => {
|
|
|
2933
3426
|
};
|
|
2934
3427
|
};
|
|
2935
3428
|
export {
|
|
2936
|
-
|
|
2937
|
-
|
|
3429
|
+
BreakStepSchema,
|
|
3430
|
+
ContinueStepSchema,
|
|
3431
|
+
IGNORED_DIRECTORIES,
|
|
3432
|
+
IfElseStepSchema,
|
|
3433
|
+
ListSkillsInputSchema,
|
|
3434
|
+
ListSkillsOutputSchema,
|
|
3435
|
+
LoadSkillInputSchema,
|
|
3436
|
+
LoadSkillOutputSchema,
|
|
2938
3437
|
MockProvider,
|
|
3438
|
+
NodeFileSystemProvider,
|
|
3439
|
+
ReadSkillFileInputSchema,
|
|
3440
|
+
ReadSkillFileOutputSchema,
|
|
3441
|
+
SKILL_ERROR_MESSAGES,
|
|
3442
|
+
SKILL_LIMITS,
|
|
3443
|
+
SOURCE_ICONS,
|
|
3444
|
+
SUSPICIOUS_PATTERNS,
|
|
3445
|
+
SkillDiscoveryError,
|
|
3446
|
+
SkillDiscoveryService,
|
|
3447
|
+
SkillValidationError,
|
|
2939
3448
|
TOOL_GROUPS,
|
|
2940
3449
|
TaskEventKind,
|
|
2941
3450
|
TodoItemSchema,
|
|
2942
3451
|
TodoStatus,
|
|
3452
|
+
TryCatchStepSchema,
|
|
2943
3453
|
UpdateTodoItemInputSchema,
|
|
2944
3454
|
UpdateTodoItemOutputSchema,
|
|
2945
3455
|
UsageMeter,
|
|
3456
|
+
WhileLoopStepSchema,
|
|
3457
|
+
WorkflowControlFlowStepSchema,
|
|
2946
3458
|
WorkflowDefinitionSchema,
|
|
2947
3459
|
WorkflowFileSchema,
|
|
2948
3460
|
WorkflowInputDefinitionSchema,
|
|
@@ -2956,25 +3468,36 @@ export {
|
|
|
2956
3468
|
executeCommand_default as executeCommand,
|
|
2957
3469
|
fetchUrl_default as fetchUrl,
|
|
2958
3470
|
fromJsonModelMessage,
|
|
2959
|
-
|
|
2960
|
-
generateWorkflowDefinitionWorkflow,
|
|
3471
|
+
getSkillStats,
|
|
2961
3472
|
listFiles_default as listFiles,
|
|
3473
|
+
listSkills,
|
|
3474
|
+
listSkillsToolInfo,
|
|
3475
|
+
loadSkill,
|
|
3476
|
+
loadSkillToolInfo,
|
|
2962
3477
|
makeStepFn,
|
|
3478
|
+
mcpServerConfigSchema,
|
|
2963
3479
|
parseDynamicWorkflowDefinition,
|
|
2964
3480
|
parseJsonFromMarkdown,
|
|
3481
|
+
providerConfigSchema,
|
|
2965
3482
|
providerModelSchema,
|
|
2966
3483
|
readBinaryFile_default as readBinaryFile,
|
|
2967
3484
|
readFile_default as readFile,
|
|
3485
|
+
readSkillFile,
|
|
3486
|
+
readSkillFileToolInfo,
|
|
2968
3487
|
removeFile_default as removeFile,
|
|
2969
3488
|
renameFile_default as renameFile,
|
|
2970
3489
|
replaceInFile_default as replaceInFile,
|
|
2971
3490
|
replaceInFile as replaceInFileHelper,
|
|
2972
3491
|
responsePrompts,
|
|
2973
3492
|
ruleSchema,
|
|
3493
|
+
scriptSchema,
|
|
2974
3494
|
search_default as search,
|
|
2975
3495
|
searchFiles_default as searchFiles,
|
|
3496
|
+
skillMetadataSchema,
|
|
2976
3497
|
toJsonModelMessage,
|
|
2977
|
-
|
|
2978
|
-
|
|
3498
|
+
validateSkillMetadata,
|
|
3499
|
+
validateSkillReferences,
|
|
3500
|
+
validateSkillSecurity,
|
|
3501
|
+
validateWorkflowFile,
|
|
2979
3502
|
writeToFile_default as writeToFile
|
|
2980
3503
|
};
|