@mastra/agent-builder 0.0.0-experimental-agent-builder-20250815195917 → 0.0.1-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/CHANGELOG.md +22 -16
  2. package/README.md +4 -17
  3. package/dist/agent/index.d.ts +5885 -0
  4. package/dist/agent/index.d.ts.map +1 -0
  5. package/dist/defaults.d.ts +6529 -0
  6. package/dist/defaults.d.ts.map +1 -0
  7. package/dist/index.d.ts +4 -1
  8. package/dist/index.d.ts.map +1 -0
  9. package/dist/index.js +2943 -591
  10. package/dist/index.js.map +1 -0
  11. package/dist/processors/tool-summary.d.ts +29 -0
  12. package/dist/processors/tool-summary.d.ts.map +1 -0
  13. package/dist/processors/write-file.d.ts +10 -0
  14. package/dist/processors/write-file.d.ts.map +1 -0
  15. package/dist/types.d.ts +1121 -0
  16. package/dist/types.d.ts.map +1 -0
  17. package/dist/utils.d.ts +63 -0
  18. package/dist/utils.d.ts.map +1 -0
  19. package/dist/workflows/index.d.ts +5 -0
  20. package/dist/workflows/index.d.ts.map +1 -0
  21. package/dist/workflows/shared/schema.d.ts +139 -0
  22. package/dist/workflows/shared/schema.d.ts.map +1 -0
  23. package/dist/workflows/task-planning/prompts.d.ts +37 -0
  24. package/dist/workflows/task-planning/prompts.d.ts.map +1 -0
  25. package/dist/workflows/task-planning/schema.d.ts +548 -0
  26. package/dist/workflows/task-planning/schema.d.ts.map +1 -0
  27. package/dist/workflows/task-planning/task-planning.d.ts +992 -0
  28. package/dist/workflows/task-planning/task-planning.d.ts.map +1 -0
  29. package/dist/workflows/template-builder/template-builder.d.ts +1910 -0
  30. package/dist/workflows/template-builder/template-builder.d.ts.map +1 -0
  31. package/dist/workflows/workflow-builder/prompts.d.ts +44 -0
  32. package/dist/workflows/workflow-builder/prompts.d.ts.map +1 -0
  33. package/dist/workflows/workflow-builder/schema.d.ts +1170 -0
  34. package/dist/workflows/workflow-builder/schema.d.ts.map +1 -0
  35. package/dist/workflows/workflow-builder/tools.d.ts +309 -0
  36. package/dist/workflows/workflow-builder/tools.d.ts.map +1 -0
  37. package/dist/workflows/workflow-builder/workflow-builder.d.ts +2714 -0
  38. package/dist/workflows/workflow-builder/workflow-builder.d.ts.map +1 -0
  39. package/dist/workflows/workflow-map.d.ts +3735 -0
  40. package/dist/workflows/workflow-map.d.ts.map +1 -0
  41. package/package.json +21 -9
  42. package/dist/_tsup-dts-rollup.d.cts +0 -13109
  43. package/dist/_tsup-dts-rollup.d.ts +0 -13109
  44. package/dist/index.cjs +0 -3772
  45. package/dist/index.d.cts +0 -1
  46. package/eslint.config.js +0 -11
  47. package/integration-tests/CHANGELOG.md +0 -20
  48. package/integration-tests/README.md +0 -154
  49. package/integration-tests/docker-compose.yml +0 -39
  50. package/integration-tests/package.json +0 -38
  51. package/integration-tests/src/agent-template-behavior.test.ts +0 -103
  52. package/integration-tests/src/fixtures/minimal-mastra-project/env.example +0 -6
  53. package/integration-tests/src/fixtures/minimal-mastra-project/package.json +0 -17
  54. package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/agents/weather.ts +0 -34
  55. package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/index.ts +0 -15
  56. package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/mcp/index.ts +0 -46
  57. package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/tools/weather.ts +0 -13
  58. package/integration-tests/src/fixtures/minimal-mastra-project/tsconfig.json +0 -17
  59. package/integration-tests/src/template-integration.test.ts +0 -312
  60. package/integration-tests/tsconfig.json +0 -13
  61. package/integration-tests/vitest.config.ts +0 -17
  62. package/src/agent-builder.test.ts +0 -291
  63. package/src/defaults.ts +0 -2728
  64. package/src/index.ts +0 -187
  65. package/src/processors/tool-summary.ts +0 -136
  66. package/src/processors/write-file.ts +0 -17
  67. package/src/types.ts +0 -120
  68. package/src/utils.ts +0 -133
  69. package/src/workflows/index.ts +0 -1541
  70. package/tsconfig.json +0 -5
  71. package/vitest.config.ts +0 -11
package/dist/index.js CHANGED
@@ -1,21 +1,23 @@
1
1
  import { Agent } from '@mastra/core/agent';
2
2
  import { Memory } from '@mastra/memory';
3
3
  import { TokenLimiter } from '@mastra/memory/processors';
4
- import { exec as exec$1, spawn as spawn$1 } from 'child_process';
5
- import { mkdtemp, rm, readFile, mkdir, copyFile, writeFile, stat, readdir } from 'fs/promises';
6
- import { join, resolve, dirname, extname, basename, isAbsolute, relative } from 'path';
4
+ import { exec as exec$1, execFile as execFile$1, spawn as spawn$1 } from 'child_process';
5
+ import { mkdtemp, rm, readFile, writeFile, readdir, mkdir, copyFile, stat } from 'fs/promises';
6
+ import { join, resolve, basename, extname, dirname, isAbsolute, relative } from 'path';
7
7
  import { createTool } from '@mastra/core/tools';
8
- import { MCPClient } from '@mastra/mcp';
8
+ import ignore from 'ignore';
9
9
  import { z } from 'zod';
10
- import { existsSync } from 'fs';
10
+ import { existsSync, readFileSync } from 'fs';
11
11
  import { createRequire } from 'module';
12
12
  import { promisify } from 'util';
13
- import { MemoryProcessor, Agent as Agent$1 } from '@mastra/core';
13
+ import { openai as openai$1 } from '@ai-sdk/openai_v5';
14
+ import { createStep as createStep$1, Agent as Agent$1, createWorkflow as createWorkflow$1, createTool as createTool$1, MemoryProcessor } from '@mastra/core';
14
15
  import { tmpdir } from 'os';
15
16
  import { openai } from '@ai-sdk/openai';
16
17
  import { createStep, createWorkflow } from '@mastra/core/workflows';
18
+ import { stepCountIs } from 'ai';
17
19
 
18
- // src/index.ts
20
+ // src/agent/index.ts
19
21
  var UNIT_KINDS = ["mcp-server", "tool", "workflow", "agent", "integration", "network", "other"];
20
22
  var TemplateUnitSchema = z.object({
21
23
  kind: z.enum(UNIT_KINDS),
@@ -28,7 +30,7 @@ z.object({
28
30
  description: z.string().optional(),
29
31
  units: z.array(TemplateUnitSchema)
30
32
  });
31
- var MergeInputSchema = z.object({
33
+ var AgentBuilderInputSchema = z.object({
32
34
  repo: z.string().describe("Git URL or local path of the template repo"),
33
35
  ref: z.string().optional().describe("Tag/branch/commit to checkout (defaults to main/master)"),
34
36
  slug: z.string().optional().describe("Slug for branch/scripts; defaults to inferred from repo"),
@@ -40,51 +42,342 @@ z.object({
40
42
  templateDir: z.string(),
41
43
  units: z.array(TemplateUnitSchema)
42
44
  });
45
+ var CopiedFileSchema = z.object({
46
+ source: z.string(),
47
+ destination: z.string(),
48
+ unit: z.object({
49
+ kind: z.enum(UNIT_KINDS),
50
+ id: z.string()
51
+ })
52
+ });
53
+ var ConflictSchema = z.object({
54
+ unit: z.object({
55
+ kind: z.enum(UNIT_KINDS),
56
+ id: z.string()
57
+ }),
58
+ issue: z.string(),
59
+ sourceFile: z.string(),
60
+ targetFile: z.string()
61
+ });
62
+ var FileCopyInputSchema = z.object({
63
+ orderedUnits: z.array(TemplateUnitSchema),
64
+ templateDir: z.string(),
65
+ commitSha: z.string(),
66
+ slug: z.string(),
67
+ targetPath: z.string().optional()
68
+ });
69
+ var FileCopyResultSchema = z.object({
70
+ success: z.boolean(),
71
+ copiedFiles: z.array(CopiedFileSchema),
72
+ conflicts: z.array(ConflictSchema),
73
+ message: z.string(),
74
+ error: z.string().optional()
75
+ });
76
+ var ConflictResolutionSchema = z.object({
77
+ unit: z.object({
78
+ kind: z.enum(UNIT_KINDS),
79
+ id: z.string()
80
+ }),
81
+ issue: z.string(),
82
+ resolution: z.string()
83
+ });
84
+ var IntelligentMergeInputSchema = z.object({
85
+ conflicts: z.array(ConflictSchema),
86
+ copiedFiles: z.array(CopiedFileSchema),
87
+ templateDir: z.string(),
88
+ commitSha: z.string(),
89
+ slug: z.string(),
90
+ targetPath: z.string().optional(),
91
+ branchName: z.string().optional()
92
+ });
93
+ var IntelligentMergeResultSchema = z.object({
94
+ success: z.boolean(),
95
+ applied: z.boolean(),
96
+ message: z.string(),
97
+ conflictsResolved: z.array(ConflictResolutionSchema),
98
+ error: z.string().optional()
99
+ });
100
+ var ValidationResultsSchema = z.object({
101
+ valid: z.boolean(),
102
+ errorsFixed: z.number(),
103
+ remainingErrors: z.number()
104
+ });
105
+ var ValidationFixInputSchema = z.object({
106
+ commitSha: z.string(),
107
+ slug: z.string(),
108
+ targetPath: z.string().optional(),
109
+ templateDir: z.string(),
110
+ orderedUnits: z.array(TemplateUnitSchema),
111
+ copiedFiles: z.array(CopiedFileSchema),
112
+ conflictsResolved: z.array(ConflictResolutionSchema).optional(),
113
+ maxIterations: z.number().optional().default(5)
114
+ });
115
+ var ValidationFixResultSchema = z.object({
116
+ success: z.boolean(),
117
+ applied: z.boolean(),
118
+ message: z.string(),
119
+ validationResults: ValidationResultsSchema,
120
+ error: z.string().optional()
121
+ });
43
122
  var ApplyResultSchema = z.object({
44
123
  success: z.boolean(),
45
124
  applied: z.boolean(),
46
125
  branchName: z.string().optional(),
126
+ message: z.string(),
127
+ validationResults: ValidationResultsSchema.optional(),
128
+ error: z.string().optional(),
129
+ errors: z.array(z.string()).optional(),
130
+ stepResults: z.object({
131
+ cloneSuccess: z.boolean().optional(),
132
+ analyzeSuccess: z.boolean().optional(),
133
+ discoverSuccess: z.boolean().optional(),
134
+ orderSuccess: z.boolean().optional(),
135
+ prepareBranchSuccess: z.boolean().optional(),
136
+ packageMergeSuccess: z.boolean().optional(),
137
+ installSuccess: z.boolean().optional(),
138
+ copySuccess: z.boolean().optional(),
139
+ mergeSuccess: z.boolean().optional(),
140
+ validationSuccess: z.boolean().optional(),
141
+ filesCopied: z.number(),
142
+ conflictsSkipped: z.number(),
143
+ conflictsResolved: z.number()
144
+ }).optional()
145
+ });
146
+ var CloneTemplateResultSchema = z.object({
147
+ templateDir: z.string(),
148
+ commitSha: z.string(),
149
+ slug: z.string(),
150
+ success: z.boolean().optional(),
151
+ error: z.string().optional()
152
+ });
153
+ var PackageAnalysisSchema = z.object({
154
+ name: z.string().optional(),
155
+ version: z.string().optional(),
156
+ description: z.string().optional(),
157
+ dependencies: z.record(z.string()).optional(),
158
+ devDependencies: z.record(z.string()).optional(),
159
+ peerDependencies: z.record(z.string()).optional(),
160
+ scripts: z.record(z.string()).optional(),
161
+ success: z.boolean().optional(),
162
+ error: z.string().optional()
163
+ });
164
+ var DiscoveryResultSchema = z.object({
165
+ units: z.array(TemplateUnitSchema),
166
+ success: z.boolean().optional(),
167
+ error: z.string().optional()
168
+ });
169
+ var OrderedUnitsSchema = z.object({
170
+ orderedUnits: z.array(TemplateUnitSchema),
171
+ success: z.boolean().optional(),
172
+ error: z.string().optional()
173
+ });
174
+ var PackageMergeInputSchema = z.object({
175
+ commitSha: z.string(),
176
+ slug: z.string(),
177
+ targetPath: z.string().optional(),
178
+ packageInfo: PackageAnalysisSchema
179
+ });
180
+ var PackageMergeResultSchema = z.object({
181
+ success: z.boolean(),
182
+ applied: z.boolean(),
183
+ message: z.string(),
184
+ error: z.string().optional()
185
+ });
186
+ var InstallInputSchema = z.object({
187
+ targetPath: z.string().describe("Path to the project to install packages in")
188
+ });
189
+ var InstallResultSchema = z.object({
190
+ success: z.boolean(),
191
+ error: z.string().optional()
192
+ });
193
+ var PrepareBranchInputSchema = z.object({
194
+ slug: z.string(),
195
+ commitSha: z.string().optional(),
196
+ // from clone-template if relevant
197
+ targetPath: z.string().optional()
198
+ });
199
+ var PrepareBranchResultSchema = z.object({
200
+ branchName: z.string(),
201
+ success: z.boolean().optional(),
47
202
  error: z.string().optional()
48
203
  });
49
204
 
50
205
  // src/utils.ts
51
206
  var exec = promisify(exec$1);
207
+ var execFile = promisify(execFile$1);
208
+ function isInWorkspaceSubfolder(cwd) {
209
+ try {
210
+ const currentPackageJson = resolve(cwd, "package.json");
211
+ if (!existsSync(currentPackageJson)) {
212
+ return false;
213
+ }
214
+ let currentDir = cwd;
215
+ let previousDir = "";
216
+ while (currentDir !== previousDir && currentDir !== "/") {
217
+ previousDir = currentDir;
218
+ currentDir = dirname(currentDir);
219
+ if (currentDir === cwd) {
220
+ continue;
221
+ }
222
+ console.log(`Checking for workspace indicators in: ${currentDir}`);
223
+ if (existsSync(resolve(currentDir, "pnpm-workspace.yaml"))) {
224
+ return true;
225
+ }
226
+ const parentPackageJson = resolve(currentDir, "package.json");
227
+ if (existsSync(parentPackageJson)) {
228
+ try {
229
+ const parentPkg = JSON.parse(readFileSync(parentPackageJson, "utf-8"));
230
+ if (parentPkg.workspaces) {
231
+ return true;
232
+ }
233
+ } catch {
234
+ }
235
+ }
236
+ if (existsSync(resolve(currentDir, "lerna.json"))) {
237
+ return true;
238
+ }
239
+ }
240
+ return false;
241
+ } catch (error) {
242
+ console.log(`Error in workspace detection: ${error}`);
243
+ return false;
244
+ }
245
+ }
52
246
  function spawn(command, args, options) {
53
247
  return new Promise((resolve4, reject) => {
54
248
  const childProcess = spawn$1(command, args, {
55
- // stdio: 'inherit',
249
+ stdio: "inherit",
250
+ // Enable proper stdio handling
56
251
  ...options
57
252
  });
58
253
  childProcess.on("error", (error) => {
59
254
  reject(error);
60
255
  });
256
+ childProcess.on("close", (code) => {
257
+ if (code === 0) {
258
+ resolve4(void 0);
259
+ } else {
260
+ reject(new Error(`Command failed with exit code ${code}`));
261
+ }
262
+ });
263
+ });
264
+ }
265
+ async function isGitInstalled() {
266
+ try {
267
+ await spawnWithOutput("git", ["--version"], {});
268
+ return true;
269
+ } catch {
270
+ return false;
271
+ }
272
+ }
273
+ async function isInsideGitRepo(cwd) {
274
+ try {
275
+ if (!await isGitInstalled()) return false;
276
+ const { stdout } = await spawnWithOutput("git", ["rev-parse", "--is-inside-work-tree"], { cwd });
277
+ return stdout.trim() === "true";
278
+ } catch {
279
+ return false;
280
+ }
281
+ }
282
+ function spawnWithOutput(command, args, options) {
283
+ return new Promise((resolvePromise, rejectPromise) => {
284
+ const childProcess = spawn$1(command, args, {
285
+ ...options
286
+ });
287
+ let stdout = "";
61
288
  let stderr = "";
62
- childProcess.stderr?.on("data", (message) => {
63
- stderr += message;
289
+ childProcess.on("error", (error) => {
290
+ rejectPromise(error);
291
+ });
292
+ childProcess.stdout?.on("data", (chunk) => {
293
+ process.stdout.write(chunk);
294
+ stdout += chunk?.toString?.() ?? String(chunk);
295
+ });
296
+ childProcess.stderr?.on("data", (chunk) => {
297
+ stderr += chunk?.toString?.() ?? String(chunk);
298
+ process.stderr.write(chunk);
64
299
  });
65
300
  childProcess.on("close", (code) => {
66
301
  if (code === 0) {
67
- resolve4(void 0);
302
+ resolvePromise({ stdout, stderr, code: code ?? 0 });
68
303
  } else {
69
- reject(new Error(stderr));
304
+ const err = new Error(stderr || `Command failed: ${command} ${args.join(" ")}`);
305
+ err.code = code;
306
+ rejectPromise(err);
70
307
  }
71
308
  });
72
309
  });
73
310
  }
74
311
  async function spawnSWPM(cwd, command, packageNames) {
75
- await spawn(createRequire(import.meta.filename).resolve("swpm"), [command, ...packageNames], {
76
- cwd
77
- });
312
+ try {
313
+ console.log("Running install command with swpm");
314
+ const swpmPath = createRequire(import.meta.filename).resolve("swpm");
315
+ await spawn(swpmPath, [command, ...packageNames], { cwd });
316
+ return;
317
+ } catch (e) {
318
+ console.log("Failed to run install command with swpm", e);
319
+ }
320
+ try {
321
+ let packageManager;
322
+ if (existsSync(resolve(cwd, "pnpm-lock.yaml"))) {
323
+ packageManager = "pnpm";
324
+ } else if (existsSync(resolve(cwd, "yarn.lock"))) {
325
+ packageManager = "yarn";
326
+ } else {
327
+ packageManager = "npm";
328
+ }
329
+ let nativeCommand = command === "add" ? "add" : command === "install" ? "install" : command;
330
+ const args = [nativeCommand];
331
+ if (nativeCommand === "install") {
332
+ const inWorkspace = isInWorkspaceSubfolder(cwd);
333
+ if (packageManager === "pnpm") {
334
+ args.push("--force");
335
+ if (inWorkspace) {
336
+ args.push("--ignore-workspace");
337
+ }
338
+ } else if (packageManager === "npm") {
339
+ args.push("--yes");
340
+ if (inWorkspace) {
341
+ args.push("--ignore-workspaces");
342
+ }
343
+ }
344
+ }
345
+ args.push(...packageNames);
346
+ console.log(`Falling back to ${packageManager} ${args.join(" ")}`);
347
+ await spawn(packageManager, args, { cwd });
348
+ return;
349
+ } catch (e) {
350
+ console.log(`Failed to run install command with native package manager: ${e}`);
351
+ }
352
+ throw new Error(`Failed to run install command with swpm and native package managers`);
78
353
  }
79
354
  function kindWeight(kind) {
80
355
  const idx = UNIT_KINDS.indexOf(kind);
81
356
  return idx === -1 ? UNIT_KINDS.length : idx;
82
357
  }
358
+ async function fetchMastraTemplates() {
359
+ try {
360
+ const response = await fetch("https://mastra.ai/api/templates.json");
361
+ const data = await response.json();
362
+ return data;
363
+ } catch (error) {
364
+ throw new Error(`Failed to fetch Mastra templates: ${error instanceof Error ? error.message : String(error)}`);
365
+ }
366
+ }
367
+ async function getMastraTemplate(slug) {
368
+ const templates = await fetchMastraTemplates();
369
+ const template = templates.find((t) => t.slug === slug);
370
+ if (!template) {
371
+ throw new Error(`Template "${slug}" not found. Available templates: ${templates.map((t) => t.slug).join(", ")}`);
372
+ }
373
+ return template;
374
+ }
83
375
  async function logGitState(targetPath, label) {
84
376
  try {
85
- const gitStatusResult = await exec("git status --porcelain", { cwd: targetPath });
86
- const gitLogResult = await exec("git log --oneline -3", { cwd: targetPath });
87
- const gitCountResult = await exec("git rev-list --count HEAD", { cwd: targetPath });
377
+ if (!await isInsideGitRepo(targetPath)) return;
378
+ const gitStatusResult = await git(targetPath, "status", "--porcelain");
379
+ const gitLogResult = await git(targetPath, "log", "--oneline", "-3");
380
+ const gitCountResult = await git(targetPath, "rev-list", "--count", "HEAD");
88
381
  console.log(`\u{1F4CA} Git state ${label}:`);
89
382
  console.log("Status:", gitStatusResult.stdout.trim() || "Clean working directory");
90
383
  console.log("Recent commits:", gitLogResult.stdout.trim());
@@ -93,6 +386,91 @@ async function logGitState(targetPath, label) {
93
386
  console.warn(`Could not get git state ${label}:`, gitError);
94
387
  }
95
388
  }
389
+ async function git(cwd, ...args) {
390
+ const { stdout, stderr } = await spawnWithOutput("git", args, { cwd });
391
+ return { stdout: stdout ?? "", stderr: stderr ?? "" };
392
+ }
393
+ async function gitClone(repo, destDir, cwd) {
394
+ await git(process.cwd(), "clone", repo, destDir);
395
+ }
396
+ async function gitCheckoutRef(cwd, ref) {
397
+ if (!await isInsideGitRepo(cwd)) return;
398
+ await git(cwd, "checkout", ref);
399
+ }
400
+ async function gitRevParse(cwd, rev) {
401
+ if (!await isInsideGitRepo(cwd)) return "";
402
+ const { stdout } = await git(cwd, "rev-parse", rev);
403
+ return stdout.trim();
404
+ }
405
+ async function gitAddFiles(cwd, files) {
406
+ if (!files || files.length === 0) return;
407
+ if (!await isInsideGitRepo(cwd)) return;
408
+ await git(cwd, "add", ...files);
409
+ }
410
+ async function gitAddAll(cwd) {
411
+ if (!await isInsideGitRepo(cwd)) return;
412
+ await git(cwd, "add", ".");
413
+ }
414
+ async function gitHasStagedChanges(cwd) {
415
+ if (!await isInsideGitRepo(cwd)) return false;
416
+ const { stdout } = await git(cwd, "diff", "--cached", "--name-only");
417
+ return stdout.trim().length > 0;
418
+ }
419
+ async function gitCommit(cwd, message, opts) {
420
+ try {
421
+ if (!await isInsideGitRepo(cwd)) return false;
422
+ if (opts?.skipIfNoStaged) {
423
+ const has = await gitHasStagedChanges(cwd);
424
+ if (!has) return false;
425
+ }
426
+ const args = ["commit", "-m", message];
427
+ if (opts?.allowEmpty) args.push("--allow-empty");
428
+ await git(cwd, ...args);
429
+ return true;
430
+ } catch (e) {
431
+ const msg = e instanceof Error ? e.message : String(e);
432
+ if (/nothing to commit/i.test(msg) || /no changes added to commit/i.test(msg)) {
433
+ return false;
434
+ }
435
+ throw e;
436
+ }
437
+ }
438
+ async function gitAddAndCommit(cwd, message, files, opts) {
439
+ try {
440
+ if (!await isInsideGitRepo(cwd)) return false;
441
+ if (files && files.length > 0) {
442
+ await gitAddFiles(cwd, files);
443
+ } else {
444
+ await gitAddAll(cwd);
445
+ }
446
+ return gitCommit(cwd, message, opts);
447
+ } catch (e) {
448
+ console.error(`Failed to add and commit files: ${e instanceof Error ? e.message : String(e)}`);
449
+ return false;
450
+ }
451
+ }
452
+ async function gitCheckoutBranch(branchName, targetPath) {
453
+ try {
454
+ if (!await isInsideGitRepo(targetPath)) return;
455
+ await git(targetPath, "checkout", "-b", branchName);
456
+ console.log(`Created new branch: ${branchName}`);
457
+ } catch (error) {
458
+ const errorStr = error instanceof Error ? error.message : String(error);
459
+ if (errorStr.includes("already exists")) {
460
+ try {
461
+ await git(targetPath, "checkout", branchName);
462
+ console.log(`Switched to existing branch: ${branchName}`);
463
+ } catch {
464
+ const timestamp = Date.now().toString().slice(-6);
465
+ const uniqueBranchName = `${branchName}-${timestamp}`;
466
+ await git(targetPath, "checkout", "-b", uniqueBranchName);
467
+ console.log(`Created unique branch: ${uniqueBranchName}`);
468
+ }
469
+ } else {
470
+ throw error;
471
+ }
472
+ }
473
+ }
96
474
  async function backupAndReplaceFile(sourceFile, targetFile) {
97
475
  const backupFile = `${targetFile}.backup-${Date.now()}`;
98
476
  await copyFile(targetFile, backupFile);
@@ -115,6 +493,42 @@ async function renameAndCopyFile(sourceFile, targetFile) {
115
493
  console.log(`\u{1F4DD} Copied with unique name: ${basename(uniqueTargetFile)}`);
116
494
  return uniqueTargetFile;
117
495
  }
496
+ var resolveModel = (runtimeContext) => {
497
+ const modelFromContext = runtimeContext.get("model");
498
+ if (modelFromContext) {
499
+ console.log(`Using model: ${modelFromContext}`);
500
+ if (isValidMastraLanguageModel(modelFromContext)) {
501
+ return modelFromContext;
502
+ }
503
+ throw new Error(
504
+ 'Invalid model provided. Model must be a MastraLanguageModel instance (e.g., openai("gpt-4"), anthropic("claude-3-5-sonnet"), etc.)'
505
+ );
506
+ }
507
+ return openai$1("gpt-4.1");
508
+ };
509
+ var isValidMastraLanguageModel = (model) => {
510
+ return model && typeof model === "object" && typeof model.modelId === "string" && typeof model.generate === "function";
511
+ };
512
+ var resolveTargetPath = (inputData, runtimeContext) => {
513
+ if (inputData.targetPath) {
514
+ return inputData.targetPath;
515
+ }
516
+ const contextPath = runtimeContext.get("targetPath");
517
+ if (contextPath) {
518
+ return contextPath;
519
+ }
520
+ const envRoot = process.env.MASTRA_PROJECT_ROOT?.trim();
521
+ if (envRoot) {
522
+ return envRoot;
523
+ }
524
+ const cwd = process.cwd();
525
+ const parent = dirname(cwd);
526
+ const grand = dirname(parent);
527
+ if (basename(cwd) === "output" && basename(parent) === ".mastra") {
528
+ return grand;
529
+ }
530
+ return cwd;
531
+ };
118
532
 
119
533
  // src/defaults.ts
120
534
  var AgentBuilderDefaults = class _AgentBuilderDefaults {
@@ -503,26 +917,6 @@ export const mastra = new Mastra({
503
917
  });
504
918
  \`\`\`
505
919
 
506
- ### MCPClient
507
- \`\`\`
508
- // ./src/mcp/client.ts
509
-
510
- import { MCPClient } from '@mastra/mcp-client';
511
-
512
- // leverage existing MCP servers, or create your own
513
- export const mcpClient = new MCPClient({
514
- id: 'example-mcp-client',
515
- servers: {
516
- some-mcp-server: {
517
- command: 'npx',
518
- args: ["some-mcp-server"],
519
- },
520
- },
521
- });
522
-
523
- export const tools = await mcpClient.getTools();
524
- \`\`\`
525
-
526
920
  </examples>`;
527
921
  static DEFAULT_MEMORY_CONFIG = {
528
922
  lastMessages: 20
@@ -535,28 +929,7 @@ export const tools = await mcpClient.getTools();
535
929
  network: "src/mastra/networks"
536
930
  };
537
931
  static DEFAULT_TOOLS = async (projectPath, mode = "code-editor") => {
538
- const mcpClient = new MCPClient({
539
- id: "agent-builder-mcp-client",
540
- servers: {
541
- // web: {
542
- // command: 'node',
543
- // args: ['/Users/daniellew/Documents/Mastra/web-search/build/index.js'],
544
- // },
545
- docs: {
546
- command: "npx",
547
- args: ["-y", "@mastra/mcp-docs-server"]
548
- }
549
- }
550
- });
551
- const tools = await mcpClient.getTools();
552
- const filteredTools = {};
553
- Object.keys(tools).forEach((key) => {
554
- if (!key.includes("MastraCourse")) {
555
- filteredTools[key] = tools[key];
556
- }
557
- });
558
932
  const agentBuilderTools = {
559
- ...filteredTools,
560
933
  readFile: createTool({
561
934
  id: "read-file",
562
935
  description: "Read contents of a file with optional line range selection.",
@@ -609,7 +982,7 @@ export const tools = await mcpClient.getTools();
609
982
  path: z.string().describe("Directory path to list"),
610
983
  recursive: z.boolean().default(false).describe("List subdirectories recursively"),
611
984
  includeHidden: z.boolean().default(false).describe("Include hidden files and directories"),
612
- pattern: z.string().optional().describe("Glob pattern to filter files"),
985
+ pattern: z.string().default("*").describe("Glob pattern to filter files"),
613
986
  maxDepth: z.number().default(10).describe("Maximum recursion depth"),
614
987
  includeMetadata: z.boolean().default(true).describe("Include file metadata")
615
988
  }),
@@ -735,6 +1108,60 @@ export const tools = await mcpClient.getTools();
735
1108
  return await _AgentBuilderDefaults.performMultiEdit({ ...context, projectPath });
736
1109
  }
737
1110
  }),
1111
+ replaceLines: createTool({
1112
+ id: "replace-lines",
1113
+ description: "Replace specific line ranges in files with new content. IMPORTANT: This tool replaces ENTIRE lines, not partial content within lines. Lines are 1-indexed.",
1114
+ inputSchema: z.object({
1115
+ filePath: z.string().describe("Path to the file to edit"),
1116
+ startLine: z.number().describe("Starting line number to replace (1-indexed, inclusive). Count from the first line = 1"),
1117
+ endLine: z.number().describe(
1118
+ "Ending line number to replace (1-indexed, inclusive). To replace single line, use same number as startLine"
1119
+ ),
1120
+ newContent: z.string().describe(
1121
+ 'New content to replace the lines with. Use empty string "" to delete lines completely. For multiline content, include \\n characters'
1122
+ ),
1123
+ createBackup: z.boolean().default(false).describe("Create backup file before editing")
1124
+ }),
1125
+ outputSchema: z.object({
1126
+ success: z.boolean(),
1127
+ message: z.string(),
1128
+ linesReplaced: z.number().optional(),
1129
+ backup: z.string().optional(),
1130
+ error: z.string().optional()
1131
+ }),
1132
+ execute: async ({ context }) => {
1133
+ return await _AgentBuilderDefaults.replaceLines({ ...context, projectPath });
1134
+ }
1135
+ }),
1136
+ // File diagnostics tool to help debug line replacement issues
1137
+ showFileLines: createTool({
1138
+ id: "show-file-lines",
1139
+ description: "Show specific lines from a file with line numbers. Useful for debugging before using replaceLines.",
1140
+ inputSchema: z.object({
1141
+ filePath: z.string().describe("Path to the file to examine"),
1142
+ startLine: z.number().optional().describe("Starting line number to show (1-indexed). If not provided, shows all lines"),
1143
+ endLine: z.number().optional().describe(
1144
+ "Ending line number to show (1-indexed, inclusive). If not provided but startLine is, shows only that line"
1145
+ ),
1146
+ context: z.number().default(2).describe("Number of context lines to show before and after the range")
1147
+ }),
1148
+ outputSchema: z.object({
1149
+ success: z.boolean(),
1150
+ lines: z.array(
1151
+ z.object({
1152
+ lineNumber: z.number(),
1153
+ content: z.string(),
1154
+ isTarget: z.boolean().describe("Whether this line is in the target range")
1155
+ })
1156
+ ),
1157
+ totalLines: z.number(),
1158
+ message: z.string(),
1159
+ error: z.string().optional()
1160
+ }),
1161
+ execute: async ({ context }) => {
1162
+ return await _AgentBuilderDefaults.showFileLines({ ...context, projectPath });
1163
+ }
1164
+ }),
738
1165
  // Interactive Communication
739
1166
  askClarification: createTool({
740
1167
  id: "ask-clarification",
@@ -807,16 +1234,18 @@ export const tools = await mcpClient.getTools();
807
1234
  })
808
1235
  }),
809
1236
  execute: async ({ context }) => {
810
- return await _AgentBuilderDefaults.performSmartSearch(context);
1237
+ return await _AgentBuilderDefaults.performSmartSearch(context, projectPath);
811
1238
  }
812
1239
  }),
813
1240
  validateCode: createTool({
814
1241
  id: "validate-code",
815
- description: "Validates generated code through TypeScript compilation, ESLint, schema validation, and other checks",
1242
+ description: "Validates code using a fast hybrid approach: syntax \u2192 semantic \u2192 lint. RECOMMENDED: Always provide specific files for optimal performance and accuracy.",
816
1243
  inputSchema: z.object({
817
1244
  projectPath: z.string().optional().describe("Path to the project to validate (defaults to current project)"),
818
- validationType: z.array(z.enum(["types", "lint", "schemas", "tests", "build"])).describe("Types of validation to perform"),
819
- files: z.array(z.string()).optional().describe("Specific files to validate (if not provided, validates entire project)")
1245
+ validationType: z.array(z.enum(["types", "lint", "schemas", "tests", "build"])).describe('Types of validation to perform. Recommended: ["types", "lint"] for code quality'),
1246
+ files: z.array(z.string()).optional().describe(
1247
+ "RECOMMENDED: Specific files to validate (e.g., files you created/modified). Uses hybrid validation: fast syntax check \u2192 semantic types \u2192 ESLint. Without files, falls back to slower CLI validation."
1248
+ )
820
1249
  }),
821
1250
  outputSchema: z.object({
822
1251
  valid: z.boolean(),
@@ -1178,12 +1607,12 @@ export const tools = await mcpClient.getTools();
1178
1607
  */
1179
1608
  static async createMastraProject({ features, projectName }) {
1180
1609
  try {
1181
- const args = ["pnpx", "create", "mastra@latest", projectName ?? "", "-l", "openai", "-k", "skip"];
1610
+ const args = ["pnpx", "create-mastra@latest", projectName?.replace(/[;&|`$(){}\[\]]/g, "") ?? "", "-l", "openai"];
1182
1611
  if (features && features.length > 0) {
1183
1612
  args.push("--components", features.join(","));
1184
1613
  }
1185
1614
  args.push("--example");
1186
- const { stdout, stderr } = await exec(args.join(" "));
1615
+ const { stdout, stderr } = await spawnWithOutput(args[0], args.slice(1), {});
1187
1616
  return {
1188
1617
  success: true,
1189
1618
  projectPath: `./${projectName}`,
@@ -1192,6 +1621,7 @@ export const tools = await mcpClient.getTools();
1192
1621
  error: stderr
1193
1622
  };
1194
1623
  } catch (error) {
1624
+ console.log(error);
1195
1625
  return {
1196
1626
  success: false,
1197
1627
  message: `Failed to create project: ${error instanceof Error ? error.message : String(error)}`
@@ -1367,9 +1797,17 @@ export const tools = await mcpClient.getTools();
1367
1797
  * Stop the Mastra server
1368
1798
  */
1369
1799
  static async stopMastraServer({ port = 4200, projectPath: _projectPath }) {
1800
+ if (typeof port !== "number" || !Number.isInteger(port) || port < 1 || port > 65535) {
1801
+ return {
1802
+ success: false,
1803
+ status: "error",
1804
+ error: `Invalid port value: ${String(port)}`
1805
+ };
1806
+ }
1370
1807
  try {
1371
- const { stdout } = await exec(`lsof -ti:${port} || echo "No process found"`);
1372
- if (!stdout.trim() || stdout.trim() === "No process found") {
1808
+ const { stdout } = await execFile("lsof", ["-ti", String(port)]);
1809
+ const effectiveStdout = stdout.trim() ? stdout : "No process found";
1810
+ if (!effectiveStdout || effectiveStdout === "No process found") {
1373
1811
  return {
1374
1812
  success: true,
1375
1813
  status: "stopped",
@@ -1385,8 +1823,9 @@ export const tools = await mcpClient.getTools();
1385
1823
  try {
1386
1824
  process.kill(pid, "SIGTERM");
1387
1825
  killedPids.push(pid);
1388
- } catch {
1826
+ } catch (e) {
1389
1827
  failedPids.push(pid);
1828
+ console.warn(`Failed to kill process ${pid}:`, e);
1390
1829
  }
1391
1830
  }
1392
1831
  if (killedPids.length === 0) {
@@ -1397,10 +1836,16 @@ export const tools = await mcpClient.getTools();
1397
1836
  error: `Could not kill PIDs: ${failedPids.join(", ")}`
1398
1837
  };
1399
1838
  }
1839
+ if (failedPids.length > 0) {
1840
+ console.warn(
1841
+ `Killed ${killedPids.length} processes but failed to kill ${failedPids.length} processes: ${failedPids.join(", ")}`
1842
+ );
1843
+ }
1400
1844
  await new Promise((resolve4) => setTimeout(resolve4, 2e3));
1401
1845
  try {
1402
- const { stdout: checkStdout } = await exec(`lsof -ti:${port} || echo "No process found"`);
1403
- if (checkStdout.trim() && checkStdout.trim() !== "No process found") {
1846
+ const { stdout: checkStdoutRaw } = await execFile("lsof", ["-ti", String(port)]);
1847
+ const checkStdout = checkStdoutRaw.trim() ? checkStdoutRaw : "No process found";
1848
+ if (checkStdout && checkStdout !== "No process found") {
1404
1849
  const remainingPids = checkStdout.trim().split("\n").filter((pid) => pid.trim());
1405
1850
  for (const pidStr of remainingPids) {
1406
1851
  const pid = parseInt(pidStr.trim());
@@ -1412,8 +1857,9 @@ export const tools = await mcpClient.getTools();
1412
1857
  }
1413
1858
  }
1414
1859
  await new Promise((resolve4) => setTimeout(resolve4, 1e3));
1415
- const { stdout: finalCheck } = await exec(`lsof -ti:${port} || echo "No process found"`);
1416
- if (finalCheck.trim() && finalCheck.trim() !== "No process found") {
1860
+ const { stdout: finalCheckRaw } = await execFile("lsof", ["-ti", String(port)]);
1861
+ const finalCheck = finalCheckRaw.trim() ? finalCheckRaw : "No process found";
1862
+ if (finalCheck && finalCheck !== "No process found") {
1417
1863
  return {
1418
1864
  success: false,
1419
1865
  status: "unknown",
@@ -1471,8 +1917,9 @@ export const tools = await mcpClient.getTools();
1471
1917
  }
1472
1918
  } catch {
1473
1919
  try {
1474
- const { stdout } = await exec(`lsof -ti:${port} || echo "No process found"`);
1475
- const hasProcess = stdout.trim() && stdout.trim() !== "No process found";
1920
+ const { stdout } = await execFile("lsof", ["-ti", String(port)]);
1921
+ const effectiveStdout = stdout.trim() ? stdout : "No process found";
1922
+ const hasProcess = effectiveStdout && effectiveStdout !== "No process found";
1476
1923
  return {
1477
1924
  success: Boolean(hasProcess),
1478
1925
  status: hasProcess ? "starting" : "stopped",
@@ -1489,13 +1936,99 @@ export const tools = await mcpClient.getTools();
1489
1936
  }
1490
1937
  }
1491
1938
  }
1939
+ // Cache for TypeScript program (lazily loaded)
1940
+ static tsProgram = null;
1941
+ static programProjectPath = null;
1492
1942
  /**
1493
- * Validate code using TypeScript, ESLint, and other tools
1943
+ * Validate code using hybrid approach: syntax -> types -> lint
1944
+ *
1945
+ * BEST PRACTICES FOR CODING AGENTS:
1946
+ *
1947
+ * ✅ RECOMMENDED (Fast & Accurate):
1948
+ * validateCode({
1949
+ * validationType: ['types', 'lint'],
1950
+ * files: ['src/workflows/my-workflow.ts', 'src/components/Button.tsx']
1951
+ * })
1952
+ *
1953
+ * Performance: ~150ms
1954
+ * - Syntax check (1ms) - catches 80% of issues instantly
1955
+ * - Semantic validation (100ms) - full type checking with dependencies
1956
+ * - ESLint (50ms) - style and best practices
1957
+ * - Only shows errors from YOUR files
1958
+ *
1959
+ * ❌ AVOID (Slow & Noisy):
1960
+ * validateCode({ validationType: ['types', 'lint'] }) // no files specified
1961
+ *
1962
+ * Performance: ~2000ms+
1963
+ * - Full project CLI validation
1964
+ * - Shows errors from all project files (confusing)
1965
+ * - Much slower for coding agents
1966
+ *
1967
+ * @param projectPath - Project root directory (defaults to cwd)
1968
+ * @param validationType - ['types', 'lint'] recommended for most use cases
1969
+ * @param files - ALWAYS provide this for best performance
1494
1970
  */
1495
1971
  static async validateCode({
1496
1972
  projectPath,
1497
1973
  validationType,
1498
1974
  files
1975
+ }) {
1976
+ const errors = [];
1977
+ const validationsPassed = [];
1978
+ const validationsFailed = [];
1979
+ const targetProjectPath = projectPath || process.cwd();
1980
+ if (!files || files.length === 0) {
1981
+ return this.validateCodeCLI({ projectPath, validationType });
1982
+ }
1983
+ for (const filePath of files) {
1984
+ const absolutePath = isAbsolute(filePath) ? filePath : resolve(targetProjectPath, filePath);
1985
+ try {
1986
+ const fileContent = await readFile(absolutePath, "utf-8");
1987
+ const fileResults = await this.validateSingleFileHybrid(
1988
+ absolutePath,
1989
+ fileContent,
1990
+ targetProjectPath,
1991
+ validationType
1992
+ );
1993
+ errors.push(...fileResults.errors);
1994
+ for (const type of validationType) {
1995
+ const hasErrors = fileResults.errors.some((e) => e.type === type && e.severity === "error");
1996
+ if (hasErrors) {
1997
+ if (!validationsFailed.includes(type)) validationsFailed.push(type);
1998
+ } else {
1999
+ if (!validationsPassed.includes(type)) validationsPassed.push(type);
2000
+ }
2001
+ }
2002
+ } catch (error) {
2003
+ errors.push({
2004
+ type: "typescript",
2005
+ severity: "error",
2006
+ message: `Failed to read file ${filePath}: ${error instanceof Error ? error.message : String(error)}`,
2007
+ file: filePath
2008
+ });
2009
+ validationsFailed.push("types");
2010
+ }
2011
+ }
2012
+ const totalErrors = errors.filter((e) => e.severity === "error").length;
2013
+ const totalWarnings = errors.filter((e) => e.severity === "warning").length;
2014
+ const isValid = totalErrors === 0;
2015
+ return {
2016
+ valid: isValid,
2017
+ errors,
2018
+ summary: {
2019
+ totalErrors,
2020
+ totalWarnings,
2021
+ validationsPassed,
2022
+ validationsFailed
2023
+ }
2024
+ };
2025
+ }
2026
+ /**
2027
+ * CLI-based validation for when no specific files are provided
2028
+ */
2029
+ static async validateCodeCLI({
2030
+ projectPath,
2031
+ validationType
1499
2032
  }) {
1500
2033
  const errors = [];
1501
2034
  const validationsPassed = [];
@@ -1503,9 +2036,8 @@ export const tools = await mcpClient.getTools();
1503
2036
  const execOptions = { cwd: projectPath };
1504
2037
  if (validationType.includes("types")) {
1505
2038
  try {
1506
- const filePattern = files?.length ? files.join(" ") : "";
1507
- const tscCommand = files?.length ? `npx tsc --noEmit ${filePattern}` : "npx tsc --noEmit";
1508
- await exec(tscCommand, execOptions);
2039
+ const args = ["tsc", "--noEmit"];
2040
+ await execFile("npx", args, execOptions);
1509
2041
  validationsPassed.push("types");
1510
2042
  } catch (error) {
1511
2043
  let tsOutput = "";
@@ -1526,9 +2058,8 @@ export const tools = await mcpClient.getTools();
1526
2058
  }
1527
2059
  if (validationType.includes("lint")) {
1528
2060
  try {
1529
- const filePattern = files?.length ? files.join(" ") : ".";
1530
- const eslintCommand = `npx eslint ${filePattern} --format json`;
1531
- const { stdout } = await exec(eslintCommand, execOptions);
2061
+ const eslintArgs = ["eslint", "--format", "json"];
2062
+ const { stdout } = await execFile("npx", eslintArgs, execOptions);
1532
2063
  if (stdout) {
1533
2064
  const eslintResults = JSON.parse(stdout);
1534
2065
  const eslintErrors = _AgentBuilderDefaults.parseESLintErrors(eslintResults);
@@ -1577,33 +2108,214 @@ export const tools = await mcpClient.getTools();
1577
2108
  };
1578
2109
  }
1579
2110
  /**
1580
- * Parse ESLint errors from JSON output
2111
+ * Hybrid validation for a single file
1581
2112
  */
1582
- static parseESLintErrors(eslintResults) {
2113
+ static async validateSingleFileHybrid(filePath, fileContent, projectPath, validationType) {
1583
2114
  const errors = [];
1584
- for (const result of eslintResults) {
1585
- for (const message of result.messages || []) {
1586
- if (message.message) {
2115
+ if (validationType.includes("types")) {
2116
+ const syntaxErrors = await this.validateSyntaxOnly(fileContent, filePath);
2117
+ errors.push(...syntaxErrors);
2118
+ if (syntaxErrors.length > 0) {
2119
+ return { errors };
2120
+ }
2121
+ const typeErrors = await this.validateTypesSemantic(filePath, projectPath);
2122
+ errors.push(...typeErrors);
2123
+ }
2124
+ if (validationType.includes("lint") && !errors.some((e) => e.severity === "error")) {
2125
+ const lintErrors = await this.validateESLintSingle(filePath, projectPath);
2126
+ errors.push(...lintErrors);
2127
+ }
2128
+ return { errors };
2129
+ }
2130
+ /**
2131
+ * Fast syntax-only validation using TypeScript parser
2132
+ */
2133
+ static async validateSyntaxOnly(fileContent, fileName) {
2134
+ const errors = [];
2135
+ try {
2136
+ const ts = await import('typescript');
2137
+ const sourceFile = ts.createSourceFile(fileName, fileContent, ts.ScriptTarget.Latest, true);
2138
+ const options = {
2139
+ allowJs: true,
2140
+ checkJs: false,
2141
+ noEmit: true
2142
+ };
2143
+ const host = {
2144
+ getSourceFile: (name) => name === fileName ? sourceFile : void 0,
2145
+ writeFile: () => {
2146
+ },
2147
+ getCurrentDirectory: () => "",
2148
+ getDirectories: () => [],
2149
+ fileExists: (name) => name === fileName,
2150
+ readFile: (name) => name === fileName ? fileContent : void 0,
2151
+ getCanonicalFileName: (name) => name,
2152
+ useCaseSensitiveFileNames: () => true,
2153
+ getNewLine: () => "\n",
2154
+ getDefaultLibFileName: () => "lib.d.ts"
2155
+ };
2156
+ const program = ts.createProgram([fileName], options, host);
2157
+ const diagnostics = program.getSyntacticDiagnostics(sourceFile);
2158
+ for (const diagnostic of diagnostics) {
2159
+ if (diagnostic.start !== void 0) {
2160
+ const position = sourceFile.getLineAndCharacterOfPosition(diagnostic.start);
1587
2161
  errors.push({
1588
- type: "eslint",
1589
- severity: message.severity === 1 ? "warning" : "error",
1590
- message: message.message,
1591
- file: result.filePath || void 0,
1592
- line: message.line || void 0,
1593
- column: message.column || void 0,
1594
- code: message.ruleId || void 0
2162
+ type: "typescript",
2163
+ severity: "error",
2164
+ message: ts.flattenDiagnosticMessageText(diagnostic.messageText, "\n"),
2165
+ file: fileName,
2166
+ line: position.line + 1,
2167
+ column: position.character + 1
1595
2168
  });
1596
2169
  }
1597
2170
  }
2171
+ } catch (error) {
2172
+ console.warn("TypeScript not available for syntax validation:", error);
2173
+ const lines = fileContent.split("\n");
2174
+ const commonErrors = [
2175
+ { pattern: /\bimport\s+.*\s+from\s+['""][^'"]*$/, message: "Unterminated import statement" },
2176
+ { pattern: /\{[^}]*$/, message: "Unclosed brace" },
2177
+ { pattern: /\([^)]*$/, message: "Unclosed parenthesis" },
2178
+ { pattern: /\[[^\]]*$/, message: "Unclosed bracket" }
2179
+ ];
2180
+ lines.forEach((line, index) => {
2181
+ commonErrors.forEach(({ pattern, message }) => {
2182
+ if (pattern.test(line)) {
2183
+ errors.push({
2184
+ type: "typescript",
2185
+ severity: "error",
2186
+ message,
2187
+ file: fileName,
2188
+ line: index + 1
2189
+ });
2190
+ }
2191
+ });
2192
+ });
1598
2193
  }
1599
2194
  return errors;
1600
2195
  }
1601
2196
  /**
1602
- * Make HTTP request to server or external API
2197
+ * TypeScript semantic validation using incremental program
1603
2198
  */
1604
- static async makeHttpRequest({
1605
- method,
1606
- url,
2199
+ static async validateTypesSemantic(filePath, projectPath) {
2200
+ const errors = [];
2201
+ try {
2202
+ const program = await this.getOrCreateTSProgram(projectPath);
2203
+ if (!program) {
2204
+ return errors;
2205
+ }
2206
+ const sourceFile = program.getSourceFile(filePath);
2207
+ if (!sourceFile) {
2208
+ return errors;
2209
+ }
2210
+ const diagnostics = [
2211
+ ...program.getSemanticDiagnostics(sourceFile),
2212
+ ...program.getSyntacticDiagnostics(sourceFile)
2213
+ ];
2214
+ const ts = await import('typescript');
2215
+ for (const diagnostic of diagnostics) {
2216
+ if (diagnostic.start !== void 0) {
2217
+ const position = sourceFile.getLineAndCharacterOfPosition(diagnostic.start);
2218
+ errors.push({
2219
+ type: "typescript",
2220
+ severity: diagnostic.category === ts.DiagnosticCategory.Warning ? "warning" : "error",
2221
+ message: ts.flattenDiagnosticMessageText(diagnostic.messageText, "\n"),
2222
+ file: filePath,
2223
+ line: position.line + 1,
2224
+ column: position.character + 1
2225
+ });
2226
+ }
2227
+ }
2228
+ } catch (error) {
2229
+ console.warn(`TypeScript semantic validation failed for ${filePath}:`, error);
2230
+ }
2231
+ return errors;
2232
+ }
2233
+ /**
2234
+ * ESLint validation for a single file
2235
+ */
2236
+ static async validateESLintSingle(filePath, projectPath) {
2237
+ const errors = [];
2238
+ try {
2239
+ const { stdout } = await execFile("npx", ["eslint", filePath, "--format", "json"], { cwd: projectPath });
2240
+ if (stdout) {
2241
+ const eslintResults = JSON.parse(stdout);
2242
+ const eslintErrors = this.parseESLintErrors(eslintResults);
2243
+ errors.push(...eslintErrors);
2244
+ }
2245
+ } catch (error) {
2246
+ const errorMessage = error instanceof Error ? error.message : String(error);
2247
+ if (errorMessage.includes('"filePath"') || errorMessage.includes("messages")) {
2248
+ try {
2249
+ const eslintResults = JSON.parse(errorMessage);
2250
+ const eslintErrors = this.parseESLintErrors(eslintResults);
2251
+ errors.push(...eslintErrors);
2252
+ } catch {
2253
+ }
2254
+ }
2255
+ }
2256
+ return errors;
2257
+ }
2258
+ /**
2259
+ * Get or create TypeScript program
2260
+ */
2261
+ static async getOrCreateTSProgram(projectPath) {
2262
+ if (this.tsProgram && this.programProjectPath === projectPath) {
2263
+ return this.tsProgram;
2264
+ }
2265
+ try {
2266
+ const ts = await import('typescript');
2267
+ const configPath = ts.findConfigFile(projectPath, ts.sys.fileExists, "tsconfig.json");
2268
+ if (!configPath) {
2269
+ return null;
2270
+ }
2271
+ const configFile = ts.readConfigFile(configPath, ts.sys.readFile);
2272
+ if (configFile.error) {
2273
+ return null;
2274
+ }
2275
+ const parsedConfig = ts.parseJsonConfigFileContent(configFile.config, ts.sys, projectPath);
2276
+ if (parsedConfig.errors.length > 0) {
2277
+ return null;
2278
+ }
2279
+ this.tsProgram = ts.createProgram({
2280
+ rootNames: parsedConfig.fileNames,
2281
+ options: parsedConfig.options
2282
+ });
2283
+ this.programProjectPath = projectPath;
2284
+ return this.tsProgram;
2285
+ } catch (error) {
2286
+ console.warn("Failed to create TypeScript program:", error);
2287
+ return null;
2288
+ }
2289
+ }
2290
+ // Note: Old filterTypeScriptErrors method removed in favor of hybrid validation approach
2291
+ /**
2292
+ * Parse ESLint errors from JSON output
2293
+ */
2294
+ static parseESLintErrors(eslintResults) {
2295
+ const errors = [];
2296
+ for (const result of eslintResults) {
2297
+ for (const message of result.messages || []) {
2298
+ if (message.message) {
2299
+ errors.push({
2300
+ type: "eslint",
2301
+ severity: message.severity === 1 ? "warning" : "error",
2302
+ message: message.message,
2303
+ file: result.filePath || void 0,
2304
+ line: message.line || void 0,
2305
+ column: message.column || void 0,
2306
+ code: message.ruleId || void 0
2307
+ });
2308
+ }
2309
+ }
2310
+ }
2311
+ return errors;
2312
+ }
2313
+ /**
2314
+ * Make HTTP request to server or external API
2315
+ */
2316
+ static async makeHttpRequest({
2317
+ method,
2318
+ url,
1607
2319
  baseUrl,
1608
2320
  headers = {},
1609
2321
  body,
@@ -1662,6 +2374,11 @@ export const tools = await mcpClient.getTools();
1662
2374
  if (!_AgentBuilderDefaults.taskStorage) {
1663
2375
  _AgentBuilderDefaults.taskStorage = /* @__PURE__ */ new Map();
1664
2376
  }
2377
+ const sessions = Array.from(_AgentBuilderDefaults.taskStorage.keys());
2378
+ if (sessions.length > 10) {
2379
+ const sessionsToRemove = sessions.slice(0, sessions.length - 10);
2380
+ sessionsToRemove.forEach((session) => _AgentBuilderDefaults.taskStorage.delete(session));
2381
+ }
1665
2382
  const sessionId = "current";
1666
2383
  const existingTasks = _AgentBuilderDefaults.taskStorage.get(sessionId) || [];
1667
2384
  try {
@@ -1758,7 +2475,35 @@ export const tools = await mcpClient.getTools();
1758
2475
  static async analyzeCode(context) {
1759
2476
  try {
1760
2477
  const { action, path, language, depth = 3 } = context;
1761
- const languagePattern = language ? `*.${language}` : "*";
2478
+ const ALLOWED_LANGUAGES = [
2479
+ "js",
2480
+ "ts",
2481
+ "jsx",
2482
+ "tsx",
2483
+ "py",
2484
+ "java",
2485
+ "go",
2486
+ "cpp",
2487
+ "c",
2488
+ "cs",
2489
+ "rb",
2490
+ "php",
2491
+ "rs",
2492
+ "kt",
2493
+ "swift",
2494
+ "m",
2495
+ "scala",
2496
+ "sh",
2497
+ "json",
2498
+ "yaml",
2499
+ "yml",
2500
+ "toml",
2501
+ "ini"
2502
+ ];
2503
+ let languagePattern = "*";
2504
+ if (language && ALLOWED_LANGUAGES.includes(language)) {
2505
+ languagePattern = `*.${language}`;
2506
+ }
1762
2507
  switch (action) {
1763
2508
  case "definitions":
1764
2509
  const definitionPatterns = [
@@ -1771,9 +2516,15 @@ export const tools = await mcpClient.getTools();
1771
2516
  const definitions = [];
1772
2517
  for (const pattern of definitionPatterns) {
1773
2518
  try {
1774
- const { stdout } = await exec(
1775
- `rg -n "${pattern}" "${path}" --type ${languagePattern} --max-depth ${depth}`
1776
- );
2519
+ const { stdout } = await execFile("rg", [
2520
+ "-n",
2521
+ pattern,
2522
+ path,
2523
+ "--type",
2524
+ languagePattern,
2525
+ "--max-depth",
2526
+ String(depth)
2527
+ ]);
1777
2528
  const matches = stdout.split("\n").filter((line) => line.trim());
1778
2529
  matches.forEach((match) => {
1779
2530
  const parts = match.split(":");
@@ -1811,7 +2562,7 @@ export const tools = await mcpClient.getTools();
1811
2562
  const dependencies = [];
1812
2563
  for (const pattern of depPatterns) {
1813
2564
  try {
1814
- const { stdout } = await exec(`rg -n "${pattern}" "${path}" --type ${languagePattern}`);
2565
+ const { stdout } = await execFile("rg", ["-n", pattern, path, "--type", languagePattern]);
1815
2566
  const matches = stdout.split("\n").filter((line) => line.trim());
1816
2567
  matches.forEach((match) => {
1817
2568
  const parts = match.split(":");
@@ -1838,10 +2589,11 @@ export const tools = await mcpClient.getTools();
1838
2589
  message: `Found ${dependencies.length} dependencies`
1839
2590
  };
1840
2591
  case "structure":
1841
- const { stdout: lsOutput } = await exec(`find "${path}" -type f -name "${languagePattern}" | head -1000`);
1842
- const files = lsOutput.split("\n").filter((line) => line.trim());
1843
- const { stdout: dirOutput } = await exec(`find "${path}" -type d | wc -l`);
1844
- const directories = parseInt(dirOutput.trim());
2592
+ const { stdout: lsOutput } = await execFile("find", [path, "-type", "f", "-name", languagePattern]);
2593
+ const allFiles = lsOutput.split("\n").filter((line) => line.trim());
2594
+ const files = allFiles.slice(0, 1e3);
2595
+ const { stdout: dirOutput } = await execFile("find", [path, "-type", "d"]);
2596
+ const directories = dirOutput.split("\n").filter((line) => line.trim()).length;
1845
2597
  const languages = {};
1846
2598
  files.forEach((file) => {
1847
2599
  const ext = file.split(".").pop();
@@ -1881,56 +2633,57 @@ export const tools = await mcpClient.getTools();
1881
2633
  * Perform multiple edits across files atomically
1882
2634
  */
1883
2635
  static async performMultiEdit(context) {
2636
+ const { operations, createBackup = false, projectPath = process.cwd() } = context;
1884
2637
  const results = [];
1885
2638
  try {
1886
- const { projectPath } = context;
1887
- for (const operation of context.operations) {
1888
- const resolvedPath = isAbsolute(operation.filePath) ? operation.filePath : resolve(projectPath || process.cwd(), operation.filePath);
1889
- const result = {
1890
- filePath: resolvedPath,
1891
- editsApplied: 0,
1892
- errors: [],
1893
- backup: void 0
1894
- };
2639
+ for (const operation of operations) {
2640
+ const filePath = isAbsolute(operation.filePath) ? operation.filePath : join(projectPath, operation.filePath);
2641
+ let editsApplied = 0;
2642
+ const errors = [];
2643
+ let backup;
1895
2644
  try {
1896
- const originalContent = await readFile(resolvedPath, "utf-8");
1897
- if (context.createBackup) {
1898
- const backupPath = `${resolvedPath}.backup.${Date.now()}`;
1899
- await writeFile(backupPath, originalContent);
1900
- result.backup = backupPath;
2645
+ if (createBackup) {
2646
+ const backupPath = `${filePath}.backup.${Date.now()}`;
2647
+ const originalContent = await readFile(filePath, "utf-8");
2648
+ await writeFile(backupPath, originalContent, "utf-8");
2649
+ backup = backupPath;
1901
2650
  }
1902
- let modifiedContent = originalContent;
2651
+ let content = await readFile(filePath, "utf-8");
1903
2652
  for (const edit of operation.edits) {
1904
- if (edit.replaceAll) {
1905
- const regex = new RegExp(edit.oldString.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "g");
1906
- const matches = modifiedContent.match(regex);
2653
+ const { oldString, newString, replaceAll = false } = edit;
2654
+ if (replaceAll) {
2655
+ const regex = new RegExp(oldString.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "g");
2656
+ const matches = content.match(regex);
1907
2657
  if (matches) {
1908
- modifiedContent = modifiedContent.replace(regex, edit.newString);
1909
- result.editsApplied += matches.length;
2658
+ content = content.replace(regex, newString);
2659
+ editsApplied += matches.length;
1910
2660
  }
1911
2661
  } else {
1912
- if (modifiedContent.includes(edit.oldString)) {
1913
- modifiedContent = modifiedContent.replace(edit.oldString, edit.newString);
1914
- result.editsApplied++;
2662
+ if (content.includes(oldString)) {
2663
+ content = content.replace(oldString, newString);
2664
+ editsApplied++;
1915
2665
  } else {
1916
- result.errors.push(`String not found: "${edit.oldString.substring(0, 50)}..."`);
2666
+ errors.push(`String not found: "${oldString.substring(0, 50)}${oldString.length > 50 ? "..." : ""}"`);
1917
2667
  }
1918
2668
  }
1919
2669
  }
1920
- if (result.editsApplied > 0) {
1921
- await writeFile(resolvedPath, modifiedContent);
1922
- }
2670
+ await writeFile(filePath, content, "utf-8");
1923
2671
  } catch (error) {
1924
- result.errors.push(error instanceof Error ? error.message : String(error));
2672
+ errors.push(`File operation error: ${error instanceof Error ? error.message : String(error)}`);
1925
2673
  }
1926
- results.push(result);
2674
+ results.push({
2675
+ filePath: operation.filePath,
2676
+ editsApplied,
2677
+ errors,
2678
+ backup
2679
+ });
1927
2680
  }
1928
2681
  const totalEdits = results.reduce((sum, r) => sum + r.editsApplied, 0);
1929
2682
  const totalErrors = results.reduce((sum, r) => sum + r.errors.length, 0);
1930
2683
  return {
1931
2684
  success: totalErrors === 0,
1932
2685
  results,
1933
- message: `Applied ${totalEdits} edits across ${results.length} files${totalErrors > 0 ? ` with ${totalErrors} errors` : ""}`
2686
+ message: `Applied ${totalEdits} edits across ${operations.length} files${totalErrors > 0 ? ` with ${totalErrors} errors` : ""}`
1934
2687
  };
1935
2688
  } catch (error) {
1936
2689
  return {
@@ -1940,6 +2693,109 @@ export const tools = await mcpClient.getTools();
1940
2693
  };
1941
2694
  }
1942
2695
  }
2696
+ /**
2697
+ * Replace specific line ranges in a file with new content
2698
+ */
2699
+ static async replaceLines(context) {
2700
+ const { filePath, startLine, endLine, newContent, createBackup = false, projectPath = process.cwd() } = context;
2701
+ try {
2702
+ const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath);
2703
+ const content = await readFile(fullPath, "utf-8");
2704
+ const lines = content.split("\n");
2705
+ if (startLine < 1 || endLine < 1) {
2706
+ return {
2707
+ success: false,
2708
+ message: `Line numbers must be 1 or greater. Got startLine: ${startLine}, endLine: ${endLine}`,
2709
+ error: "Invalid line range"
2710
+ };
2711
+ }
2712
+ if (startLine > lines.length || endLine > lines.length) {
2713
+ return {
2714
+ success: false,
2715
+ message: `Line range ${startLine}-${endLine} is out of bounds. File has ${lines.length} lines. Remember: lines are 1-indexed, so valid range is 1-${lines.length}.`,
2716
+ error: "Invalid line range"
2717
+ };
2718
+ }
2719
+ if (startLine > endLine) {
2720
+ return {
2721
+ success: false,
2722
+ message: `Start line (${startLine}) cannot be greater than end line (${endLine}).`,
2723
+ error: "Invalid line range"
2724
+ };
2725
+ }
2726
+ let backup;
2727
+ if (createBackup) {
2728
+ const backupPath = `${fullPath}.backup.${Date.now()}`;
2729
+ await writeFile(backupPath, content, "utf-8");
2730
+ backup = backupPath;
2731
+ }
2732
+ const beforeLines = lines.slice(0, startLine - 1);
2733
+ const afterLines = lines.slice(endLine);
2734
+ const newLines = newContent ? newContent.split("\n") : [];
2735
+ const updatedLines = [...beforeLines, ...newLines, ...afterLines];
2736
+ const updatedContent = updatedLines.join("\n");
2737
+ await writeFile(fullPath, updatedContent, "utf-8");
2738
+ const linesReplaced = endLine - startLine + 1;
2739
+ const newLineCount = newLines.length;
2740
+ return {
2741
+ success: true,
2742
+ message: `Successfully replaced ${linesReplaced} lines (${startLine}-${endLine}) with ${newLineCount} new lines in ${filePath}`,
2743
+ linesReplaced,
2744
+ backup
2745
+ };
2746
+ } catch (error) {
2747
+ return {
2748
+ success: false,
2749
+ message: `Failed to replace lines: ${error instanceof Error ? error.message : String(error)}`,
2750
+ error: error instanceof Error ? error.message : String(error)
2751
+ };
2752
+ }
2753
+ }
2754
+ /**
2755
+ * Show file lines with line numbers for debugging
2756
+ */
2757
+ static async showFileLines(context) {
2758
+ const { filePath, startLine, endLine, context: contextLines = 2, projectPath = process.cwd() } = context;
2759
+ try {
2760
+ const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath);
2761
+ const content = await readFile(fullPath, "utf-8");
2762
+ const lines = content.split("\n");
2763
+ let targetStart = startLine;
2764
+ let targetEnd = endLine;
2765
+ if (!targetStart) {
2766
+ targetStart = 1;
2767
+ targetEnd = lines.length;
2768
+ } else if (!targetEnd) {
2769
+ targetEnd = targetStart;
2770
+ }
2771
+ const displayStart = Math.max(1, targetStart - contextLines);
2772
+ const displayEnd = Math.min(lines.length, targetEnd + contextLines);
2773
+ const result = [];
2774
+ for (let i = displayStart; i <= displayEnd; i++) {
2775
+ const lineIndex = i - 1;
2776
+ const isTarget = i >= targetStart && i <= targetEnd;
2777
+ result.push({
2778
+ lineNumber: i,
2779
+ content: lineIndex < lines.length ? lines[lineIndex] ?? "" : "",
2780
+ isTarget
2781
+ });
2782
+ }
2783
+ return {
2784
+ success: true,
2785
+ lines: result,
2786
+ totalLines: lines.length,
2787
+ message: `Showing lines ${displayStart}-${displayEnd} of ${lines.length} total lines in ${filePath}`
2788
+ };
2789
+ } catch (error) {
2790
+ return {
2791
+ success: false,
2792
+ lines: [],
2793
+ totalLines: 0,
2794
+ message: `Failed to read file: ${error instanceof Error ? error.message : String(error)}`,
2795
+ error: error instanceof Error ? error.message : String(error)
2796
+ };
2797
+ }
2798
+ }
1943
2799
  /**
1944
2800
  * Ask user for clarification
1945
2801
  */
@@ -1986,32 +2842,38 @@ export const tools = await mcpClient.getTools();
1986
2842
  /**
1987
2843
  * Perform intelligent search with context
1988
2844
  */
1989
- static async performSmartSearch(context) {
2845
+ static async performSmartSearch(context, projectPath) {
1990
2846
  try {
1991
2847
  const { query, type = "text", scope = {}, context: searchContext = {} } = context;
1992
2848
  const { paths = ["."], fileTypes = [], excludePaths = [], maxResults = 50 } = scope;
1993
2849
  const { beforeLines = 2, afterLines = 2 } = searchContext;
1994
- let rgCommand = "rg";
1995
- if (beforeLines > 0 || afterLines > 0) {
1996
- rgCommand += ` -A ${afterLines} -B ${beforeLines}`;
2850
+ const rgArgs = [];
2851
+ if (beforeLines > 0) {
2852
+ rgArgs.push("-B", beforeLines.toString());
2853
+ }
2854
+ if (afterLines > 0) {
2855
+ rgArgs.push("-A", afterLines.toString());
1997
2856
  }
1998
- rgCommand += " -n";
2857
+ rgArgs.push("-n");
1999
2858
  if (type === "regex") {
2000
- rgCommand += " -e";
2859
+ rgArgs.push("-e");
2001
2860
  } else if (type === "fuzzy") {
2002
- rgCommand += " --fixed-strings";
2861
+ rgArgs.push("--fixed-strings");
2003
2862
  }
2004
2863
  if (fileTypes.length > 0) {
2005
2864
  fileTypes.forEach((ft) => {
2006
- rgCommand += ` --type-add 'custom:*.${ft}' -t custom`;
2865
+ rgArgs.push("--type-add", `custom:*.${ft}`, "-t", "custom");
2007
2866
  });
2008
2867
  }
2009
2868
  excludePaths.forEach((path) => {
2010
- rgCommand += ` --glob '!${path}'`;
2869
+ rgArgs.push("--glob", `!${path}`);
2870
+ });
2871
+ rgArgs.push("-m", maxResults.toString());
2872
+ rgArgs.push(query);
2873
+ rgArgs.push(...paths);
2874
+ const { stdout } = await execFile("rg", rgArgs, {
2875
+ cwd: projectPath
2011
2876
  });
2012
- rgCommand += ` -m ${maxResults}`;
2013
- rgCommand += ` "${query}" ${paths.join(" ")}`;
2014
- const { stdout } = await exec(rgCommand);
2015
2877
  const lines = stdout.split("\n").filter((line) => line.trim());
2016
2878
  const matches = [];
2017
2879
  let currentMatch = null;
@@ -2144,14 +3006,29 @@ export const tools = await mcpClient.getTools();
2144
3006
  includeMetadata = true,
2145
3007
  projectPath
2146
3008
  } = context;
3009
+ const gitignorePath = join(projectPath || process.cwd(), ".gitignore");
3010
+ let gitignoreFilter;
3011
+ try {
3012
+ const gitignoreContent = await readFile(gitignorePath, "utf-8");
3013
+ gitignoreFilter = ignore().add(gitignoreContent);
3014
+ } catch (err) {
3015
+ if (err.code !== "ENOENT") {
3016
+ console.error(`Error reading .gitignore file:`, err);
3017
+ }
3018
+ }
2147
3019
  const resolvedPath = isAbsolute(path) ? path : resolve(projectPath || process.cwd(), path);
2148
3020
  const items = [];
2149
3021
  async function processDirectory(dirPath, currentDepth = 0) {
3022
+ const relativeToProject = relative(projectPath || process.cwd(), dirPath);
3023
+ if (gitignoreFilter?.ignores(relativeToProject)) return;
2150
3024
  if (currentDepth > maxDepth) return;
2151
3025
  const entries = await readdir(dirPath);
2152
3026
  for (const entry of entries) {
3027
+ const entryPath = join(dirPath, entry);
3028
+ const relativeEntryPath = relative(projectPath || process.cwd(), entryPath);
3029
+ if (gitignoreFilter?.ignores(relativeEntryPath)) continue;
2153
3030
  if (!includeHidden && entry.startsWith(".")) continue;
2154
- const fullPath = join(dirPath, entry);
3031
+ const fullPath = entryPath;
2155
3032
  const relativePath = relative(resolvedPath, fullPath);
2156
3033
  if (pattern) {
2157
3034
  const regexPattern = pattern.replace(/\*/g, ".*").replace(/\?/g, ".");
@@ -2385,12 +3262,19 @@ var ToolSummaryProcessor = class extends MemoryProcessor {
2385
3262
  }
2386
3263
  }
2387
3264
  if (summaryTasks.length > 0) {
2388
- const summaryResults = await Promise.all(summaryTasks.map((task) => task.promise));
3265
+ const summaryResults = await Promise.allSettled(summaryTasks.map((task) => task.promise));
2389
3266
  summaryTasks.forEach((task, index) => {
2390
- const summaryResult = summaryResults[index];
2391
- const summaryText = summaryResult.text;
2392
- this.summaryCache.set(task.cacheKey, summaryText);
2393
- task.content.result = `Tool call summary: ${summaryText}`;
3267
+ const result = summaryResults[index];
3268
+ if (!result) return;
3269
+ if (result.status === "fulfilled") {
3270
+ const summaryResult = result.value;
3271
+ const summaryText = summaryResult.text;
3272
+ this.summaryCache.set(task.cacheKey, summaryText);
3273
+ task.content.result = `Tool call summary: ${summaryText}`;
3274
+ } else if (result.status === "rejected") {
3275
+ console.warn(`Failed to generate summary for tool call:`, result.reason);
3276
+ task.content.result = `Tool call summary: [Summary generation failed]`;
3277
+ }
2394
3278
  });
2395
3279
  }
2396
3280
  return messages;
@@ -2403,19 +3287,30 @@ var WriteToDiskProcessor = class extends MemoryProcessor {
2403
3287
  this.prefix = prefix;
2404
3288
  }
2405
3289
  async process(messages) {
2406
- await writeFile(`${this.prefix}-${Date.now()}.json`, JSON.stringify(messages, null, 2));
3290
+ await writeFile(`${this.prefix}-${Date.now()}-${process.pid}.json`, JSON.stringify(messages, null, 2));
2407
3291
  return messages;
2408
3292
  }
2409
3293
  };
3294
+ var resolveModel2 = (runtimeContext) => {
3295
+ const modelFromContext = runtimeContext.get("model");
3296
+ if (modelFromContext) {
3297
+ if (isValidMastraLanguageModel2(modelFromContext)) {
3298
+ return modelFromContext;
3299
+ }
3300
+ throw new Error(
3301
+ 'Invalid model provided. Model must be a MastraLanguageModel instance (e.g., openai("gpt-4"), anthropic("claude-3-5-sonnet"), etc.)'
3302
+ );
3303
+ }
3304
+ return openai("gpt-4.1");
3305
+ };
3306
+ var isValidMastraLanguageModel2 = (model) => {
3307
+ return model && typeof model === "object" && typeof model.modelId === "string" && typeof model.generate === "function";
3308
+ };
2410
3309
  var cloneTemplateStep = createStep({
2411
3310
  id: "clone-template",
2412
3311
  description: "Clone the template repository to a temporary directory at the specified ref",
2413
- inputSchema: MergeInputSchema,
2414
- outputSchema: z.object({
2415
- templateDir: z.string(),
2416
- commitSha: z.string(),
2417
- slug: z.string()
2418
- }),
3312
+ inputSchema: AgentBuilderInputSchema,
3313
+ outputSchema: CloneTemplateResultSchema,
2419
3314
  execute: async ({ inputData }) => {
2420
3315
  const { repo, ref = "main", slug } = inputData;
2421
3316
  if (!repo) {
@@ -2424,45 +3319,37 @@ var cloneTemplateStep = createStep({
2424
3319
  const inferredSlug = slug || repo.split("/").pop()?.replace(/\.git$/, "") || "template";
2425
3320
  const tempDir = await mkdtemp(join(tmpdir(), "mastra-template-"));
2426
3321
  try {
2427
- const cloneCmd = `git clone "${repo}" "${tempDir}"`;
2428
- await exec(cloneCmd);
3322
+ await gitClone(repo, tempDir);
2429
3323
  if (ref !== "main" && ref !== "master") {
2430
- await exec(`git checkout "${ref}"`, { cwd: tempDir });
3324
+ await gitCheckoutRef(tempDir, ref);
2431
3325
  }
2432
- const { stdout: commitSha } = await exec("git rev-parse HEAD", { cwd: tempDir });
3326
+ const commitSha = await gitRevParse(tempDir, "HEAD");
2433
3327
  return {
2434
3328
  templateDir: tempDir,
2435
3329
  commitSha: commitSha.trim(),
2436
- slug: inferredSlug
3330
+ slug: inferredSlug,
3331
+ success: true
2437
3332
  };
2438
3333
  } catch (error) {
2439
3334
  try {
2440
3335
  await rm(tempDir, { recursive: true, force: true });
2441
3336
  } catch {
2442
3337
  }
2443
- throw new Error(`Failed to clone template: ${error instanceof Error ? error.message : String(error)}`);
3338
+ return {
3339
+ templateDir: "",
3340
+ commitSha: "",
3341
+ slug: slug || "unknown",
3342
+ success: false,
3343
+ error: `Failed to clone template: ${error instanceof Error ? error.message : String(error)}`
3344
+ };
2444
3345
  }
2445
3346
  }
2446
3347
  });
2447
3348
  var analyzePackageStep = createStep({
2448
3349
  id: "analyze-package",
2449
3350
  description: "Analyze the template package.json to extract dependency information",
2450
- inputSchema: z.object({
2451
- templateDir: z.string(),
2452
- commitSha: z.string(),
2453
- slug: z.string()
2454
- }),
2455
- outputSchema: z.object({
2456
- dependencies: z.record(z.string()).optional(),
2457
- devDependencies: z.record(z.string()).optional(),
2458
- peerDependencies: z.record(z.string()).optional(),
2459
- scripts: z.record(z.string()).optional(),
2460
- packageInfo: z.object({
2461
- name: z.string().optional(),
2462
- version: z.string().optional(),
2463
- description: z.string().optional()
2464
- })
2465
- }),
3351
+ inputSchema: CloneTemplateResultSchema,
3352
+ outputSchema: PackageAnalysisSchema,
2466
3353
  execute: async ({ inputData }) => {
2467
3354
  console.log("Analyzing template package.json...");
2468
3355
  const { templateDir } = inputData;
@@ -2476,11 +3363,10 @@ var analyzePackageStep = createStep({
2476
3363
  devDependencies: packageJson.devDependencies || {},
2477
3364
  peerDependencies: packageJson.peerDependencies || {},
2478
3365
  scripts: packageJson.scripts || {},
2479
- packageInfo: {
2480
- name: packageJson.name,
2481
- version: packageJson.version,
2482
- description: packageJson.description
2483
- }
3366
+ name: packageJson.name || "",
3367
+ version: packageJson.version || "",
3368
+ description: packageJson.description || "",
3369
+ success: true
2484
3370
  };
2485
3371
  } catch (error) {
2486
3372
  console.warn(`Failed to read template package.json: ${error instanceof Error ? error.message : String(error)}`);
@@ -2489,7 +3375,11 @@ var analyzePackageStep = createStep({
2489
3375
  devDependencies: {},
2490
3376
  peerDependencies: {},
2491
3377
  scripts: {},
2492
- packageInfo: {}
3378
+ name: "",
3379
+ version: "",
3380
+ description: "",
3381
+ success: true
3382
+ // This is a graceful fallback, not a failure
2493
3383
  };
2494
3384
  }
2495
3385
  }
@@ -2497,20 +3387,15 @@ var analyzePackageStep = createStep({
2497
3387
  var discoverUnitsStep = createStep({
2498
3388
  id: "discover-units",
2499
3389
  description: "Discover template units by analyzing the templates directory structure",
2500
- inputSchema: z.object({
2501
- templateDir: z.string(),
2502
- commitSha: z.string(),
2503
- slug: z.string()
2504
- }),
2505
- outputSchema: z.object({
2506
- units: z.array(TemplateUnitSchema)
2507
- }),
2508
- execute: async ({ inputData }) => {
3390
+ inputSchema: CloneTemplateResultSchema,
3391
+ outputSchema: DiscoveryResultSchema,
3392
+ execute: async ({ inputData, runtimeContext }) => {
2509
3393
  const { templateDir } = inputData;
2510
3394
  const tools = await AgentBuilderDefaults.DEFAULT_TOOLS(templateDir);
2511
- const agent = new Agent({
2512
- model: openai("gpt-4o-mini"),
2513
- instructions: `You are an expert at analyzing Mastra projects.
3395
+ try {
3396
+ const agent = new Agent({
3397
+ model: resolveModel2(runtimeContext),
3398
+ instructions: `You are an expert at analyzing Mastra projects.
2514
3399
 
2515
3400
  Your task is to scan the provided directory and identify all available units (agents, workflows, tools, MCP servers, networks).
2516
3401
 
@@ -2541,14 +3426,14 @@ IMPORTANT - Naming Consistency Rules:
2541
3426
  - use the relative path from the template root for the file (e.g., 'src/mastra/lib/util.ts' \u2192 file: 'src/mastra/lib/util.ts')
2542
3427
 
2543
3428
  Return the actual exported names of the units, as well as the file names.`,
2544
- name: "Mastra Project Discoverer",
2545
- tools: {
2546
- readFile: tools.readFile,
2547
- listDirectory: tools.listDirectory
2548
- }
2549
- });
2550
- const result = await agent.generate(
2551
- `Analyze the Mastra project directory structure at "${templateDir}".
3429
+ name: "Mastra Project Discoverer",
3430
+ tools: {
3431
+ readFile: tools.readFile,
3432
+ listDirectory: tools.listDirectory
3433
+ }
3434
+ });
3435
+ const result = await agent.generate(
3436
+ `Analyze the Mastra project directory structure at "${templateDir}".
2552
3437
 
2553
3438
  List directory contents using listDirectory tool, and then analyze each file with readFile tool.
2554
3439
  IMPORTANT:
@@ -2558,51 +3443,70 @@ Return the actual exported names of the units, as well as the file names.`,
2558
3443
  - If a directory doesn't exist or has no files, return an empty array
2559
3444
 
2560
3445
  Return the analysis in the exact format specified in the output schema.`,
2561
- {
2562
- experimental_output: z.object({
2563
- agents: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
2564
- workflows: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
2565
- tools: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
2566
- mcp: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
2567
- networks: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
2568
- other: z.array(z.object({ name: z.string(), file: z.string() })).optional()
2569
- }),
2570
- maxSteps: 100
3446
+ {
3447
+ experimental_output: z.object({
3448
+ agents: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3449
+ workflows: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3450
+ tools: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3451
+ mcp: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3452
+ networks: z.array(z.object({ name: z.string(), file: z.string() })).optional(),
3453
+ other: z.array(z.object({ name: z.string(), file: z.string() })).optional()
3454
+ }),
3455
+ maxSteps: 100
3456
+ }
3457
+ );
3458
+ const template = result.object ?? {};
3459
+ const units = [];
3460
+ template.agents?.forEach((agentId) => {
3461
+ units.push({ kind: "agent", id: agentId.name, file: agentId.file });
3462
+ });
3463
+ template.workflows?.forEach((workflowId) => {
3464
+ units.push({ kind: "workflow", id: workflowId.name, file: workflowId.file });
3465
+ });
3466
+ template.tools?.forEach((toolId) => {
3467
+ units.push({ kind: "tool", id: toolId.name, file: toolId.file });
3468
+ });
3469
+ template.mcp?.forEach((mcpId) => {
3470
+ units.push({ kind: "mcp-server", id: mcpId.name, file: mcpId.file });
3471
+ });
3472
+ template.networks?.forEach((networkId) => {
3473
+ units.push({ kind: "network", id: networkId.name, file: networkId.file });
3474
+ });
3475
+ template.other?.forEach((otherId) => {
3476
+ units.push({ kind: "other", id: otherId.name, file: otherId.file });
3477
+ });
3478
+ console.log("Discovered units:", JSON.stringify(units, null, 2));
3479
+ if (units.length === 0) {
3480
+ throw new Error(`No Mastra units (agents, workflows, tools) found in template.
3481
+ Possible causes:
3482
+ - Template may not follow standard Mastra structure
3483
+ - AI agent couldn't analyze template files (model/token limits)
3484
+ - Template is empty or in wrong branch
3485
+
3486
+ Debug steps:
3487
+ - Check template has files in src/mastra/ directories
3488
+ - Try a different branch
3489
+ - Check template repository structure manually`);
2571
3490
  }
2572
- );
2573
- const template = result.object ?? {};
2574
- const units = [];
2575
- template.agents?.forEach((agentId) => {
2576
- units.push({ kind: "agent", id: agentId.name, file: agentId.file });
2577
- });
2578
- template.workflows?.forEach((workflowId) => {
2579
- units.push({ kind: "workflow", id: workflowId.name, file: workflowId.file });
2580
- });
2581
- template.tools?.forEach((toolId) => {
2582
- units.push({ kind: "tool", id: toolId.name, file: toolId.file });
2583
- });
2584
- template.mcp?.forEach((mcpId) => {
2585
- units.push({ kind: "mcp-server", id: mcpId.name, file: mcpId.file });
2586
- });
2587
- template.networks?.forEach((networkId) => {
2588
- units.push({ kind: "network", id: networkId.name, file: networkId.file });
2589
- });
2590
- template.other?.forEach((otherId) => {
2591
- units.push({ kind: "other", id: otherId.name, file: otherId.file });
2592
- });
2593
- console.log("Discovered units:", JSON.stringify(units, null, 2));
2594
- return { units };
3491
+ return {
3492
+ units,
3493
+ success: true
3494
+ };
3495
+ } catch (error) {
3496
+ console.error("Failed to discover units:", error);
3497
+ return {
3498
+ units: [],
3499
+ success: false,
3500
+ error: `Failed to discover units: ${error instanceof Error ? error.message : String(error)}`
3501
+ };
3502
+ }
2595
3503
  }
2596
3504
  });
2597
3505
  var orderUnitsStep = createStep({
2598
3506
  id: "order-units",
2599
3507
  description: "Sort units in topological order based on kind weights",
2600
- inputSchema: z.object({
2601
- units: z.array(TemplateUnitSchema)
2602
- }),
2603
- outputSchema: z.object({
2604
- orderedUnits: z.array(TemplateUnitSchema)
2605
- }),
3508
+ inputSchema: DiscoveryResultSchema,
3509
+ outputSchema: OrderedUnitsSchema,
2606
3510
  execute: async ({ inputData }) => {
2607
3511
  const { units } = inputData;
2608
3512
  const orderedUnits = [...units].sort((a, b) => {
@@ -2610,103 +3514,102 @@ var orderUnitsStep = createStep({
2610
3514
  const bWeight = kindWeight(b.kind);
2611
3515
  return aWeight - bWeight;
2612
3516
  });
2613
- return { orderedUnits };
3517
+ return {
3518
+ orderedUnits,
3519
+ success: true
3520
+ };
3521
+ }
3522
+ });
3523
+ var prepareBranchStep = createStep({
3524
+ id: "prepare-branch",
3525
+ description: "Create or switch to integration branch before modifications",
3526
+ inputSchema: PrepareBranchInputSchema,
3527
+ outputSchema: PrepareBranchResultSchema,
3528
+ execute: async ({ inputData, runtimeContext }) => {
3529
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
3530
+ try {
3531
+ const branchName = `feat/install-template-${inputData.slug}`;
3532
+ await gitCheckoutBranch(branchName, targetPath);
3533
+ return {
3534
+ branchName,
3535
+ success: true
3536
+ };
3537
+ } catch (error) {
3538
+ console.error("Failed to prepare branch:", error);
3539
+ return {
3540
+ branchName: `feat/install-template-${inputData.slug}`,
3541
+ // Return the intended name anyway
3542
+ success: false,
3543
+ error: `Failed to prepare branch: ${error instanceof Error ? error.message : String(error)}`
3544
+ };
3545
+ }
2614
3546
  }
2615
3547
  });
2616
3548
  var packageMergeStep = createStep({
2617
3549
  id: "package-merge",
2618
- description: "Merge template package.json dependencies into target project and install",
2619
- inputSchema: z.object({
2620
- commitSha: z.string(),
2621
- slug: z.string(),
2622
- targetPath: z.string().optional(),
2623
- packageInfo: z.object({
2624
- dependencies: z.record(z.string()).optional(),
2625
- devDependencies: z.record(z.string()).optional(),
2626
- peerDependencies: z.record(z.string()).optional(),
2627
- scripts: z.record(z.string()).optional(),
2628
- packageInfo: z.object({
2629
- name: z.string().optional(),
2630
- version: z.string().optional(),
2631
- description: z.string().optional()
2632
- })
2633
- })
2634
- }),
2635
- outputSchema: z.object({
2636
- success: z.boolean(),
2637
- applied: z.boolean(),
2638
- message: z.string(),
2639
- error: z.string().optional()
2640
- }),
3550
+ description: "Merge template package.json dependencies into target project",
3551
+ inputSchema: PackageMergeInputSchema,
3552
+ outputSchema: PackageMergeResultSchema,
2641
3553
  execute: async ({ inputData, runtimeContext }) => {
2642
3554
  console.log("Package merge step starting...");
2643
3555
  const { slug, packageInfo } = inputData;
2644
- const targetPath = inputData.targetPath || runtimeContext.get("targetPath") || process.cwd();
3556
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
2645
3557
  try {
2646
- const allTools = await AgentBuilderDefaults.DEFAULT_TOOLS(targetPath);
2647
- const packageMergeAgent = new Agent({
2648
- name: "package-merger",
2649
- description: "Specialized agent for merging package.json dependencies",
2650
- instructions: `You are a package.json merge specialist. Your job is to:
2651
-
2652
- 1. **Read the target project's package.json** using readFile tool
2653
- 2. **Merge template dependencies** into the target package.json following these rules:
2654
- - For dependencies: Add ALL NEW ones with template versions, KEEP EXISTING versions for conflicts
2655
- - For devDependencies: Add ALL NEW ones with template versions, KEEP EXISTING versions for conflicts
2656
- - For peerDependencies: Add ALL NEW ones with template versions, KEEP EXISTING versions for conflicts
2657
- - For scripts: Add new scripts with "template:${slug}:" prefix, don't overwrite existing ones
2658
- - Maintain existing package.json structure and formatting
2659
- 3. **Write the updated package.json** using writeFile tool
2660
-
2661
- Template Dependencies to Merge:
2662
- - Dependencies: ${JSON.stringify(packageInfo.dependencies || {}, null, 2)}
2663
- - Dev Dependencies: ${JSON.stringify(packageInfo.devDependencies || {}, null, 2)}
2664
- - Peer Dependencies: ${JSON.stringify(packageInfo.peerDependencies || {}, null, 2)}
2665
- - Scripts: ${JSON.stringify(packageInfo.scripts || {}, null, 2)}
2666
-
2667
- CRITICAL MERGE RULES:
2668
- 1. For each dependency in template dependencies, if it does NOT exist in target, ADD it with template version
2669
- 2. For each dependency in template dependencies, if it ALREADY exists in target, KEEP target version
2670
- 3. You MUST add ALL template dependencies that don't conflict - do not skip any
2671
- 4. Be explicit about what you're adding vs keeping
2672
-
2673
- EXAMPLE:
2674
- Template has: {"@mastra/libsql": "latest", "@mastra/core": "latest", "zod": "^3.25.67"}
2675
- Target has: {"@mastra/core": "latest", "zod": "^3.25.0"}
2676
- Result should have: {"@mastra/core": "latest", "zod": "^3.25.0", "@mastra/libsql": "latest"}
2677
-
2678
- Be systematic and thorough. Always read the existing package.json first, then merge, then write.`,
2679
- model: openai("gpt-4o-mini"),
2680
- tools: {
2681
- readFile: allTools.readFile,
2682
- writeFile: allTools.writeFile,
2683
- listDirectory: allTools.listDirectory
3558
+ const targetPkgPath = join(targetPath, "package.json");
3559
+ let targetPkgRaw = "{}";
3560
+ try {
3561
+ targetPkgRaw = await readFile(targetPkgPath, "utf-8");
3562
+ } catch {
3563
+ console.warn(`No existing package.json at ${targetPkgPath}, creating a new one`);
3564
+ }
3565
+ let targetPkg;
3566
+ try {
3567
+ targetPkg = JSON.parse(targetPkgRaw || "{}");
3568
+ } catch (e) {
3569
+ throw new Error(
3570
+ `Failed to parse existing package.json at ${targetPkgPath}: ${e instanceof Error ? e.message : String(e)}`
3571
+ );
3572
+ }
3573
+ const ensureObj = (o) => o && typeof o === "object" ? o : {};
3574
+ targetPkg.dependencies = ensureObj(targetPkg.dependencies);
3575
+ targetPkg.devDependencies = ensureObj(targetPkg.devDependencies);
3576
+ targetPkg.peerDependencies = ensureObj(targetPkg.peerDependencies);
3577
+ targetPkg.scripts = ensureObj(targetPkg.scripts);
3578
+ const tplDeps = ensureObj(packageInfo.dependencies);
3579
+ const tplDevDeps = ensureObj(packageInfo.devDependencies);
3580
+ const tplPeerDeps = ensureObj(packageInfo.peerDependencies);
3581
+ const tplScripts = ensureObj(packageInfo.scripts);
3582
+ const existsAnywhere = (name) => name in targetPkg.dependencies || name in targetPkg.devDependencies || name in targetPkg.peerDependencies;
3583
+ for (const [name, ver] of Object.entries(tplDeps)) {
3584
+ if (!existsAnywhere(name)) {
3585
+ targetPkg.dependencies[name] = String(ver);
2684
3586
  }
2685
- });
2686
- console.log("Starting package merge agent...");
2687
- console.log("Template dependencies to merge:", JSON.stringify(packageInfo.dependencies, null, 2));
2688
- console.log("Template devDependencies to merge:", JSON.stringify(packageInfo.devDependencies, null, 2));
2689
- const result = await packageMergeAgent.stream(
2690
- `Please merge the template dependencies into the target project's package.json at ${targetPath}/package.json.`,
2691
- { experimental_output: z.object({ success: z.boolean() }) }
2692
- );
2693
- let buffer = [];
2694
- for await (const chunk of result.fullStream) {
2695
- if (chunk.type === "text-delta") {
2696
- buffer.push(chunk.textDelta);
2697
- if (buffer.length > 20) {
2698
- console.log(buffer.join(""));
2699
- buffer = [];
2700
- }
3587
+ }
3588
+ for (const [name, ver] of Object.entries(tplDevDeps)) {
3589
+ if (!existsAnywhere(name)) {
3590
+ targetPkg.devDependencies[name] = String(ver);
3591
+ }
3592
+ }
3593
+ for (const [name, ver] of Object.entries(tplPeerDeps)) {
3594
+ if (!(name in targetPkg.peerDependencies)) {
3595
+ targetPkg.peerDependencies[name] = String(ver);
2701
3596
  }
2702
3597
  }
2703
- if (buffer.length > 0) {
2704
- console.log(buffer.join(""));
3598
+ const prefix = `template:${slug}:`;
3599
+ for (const [name, cmd] of Object.entries(tplScripts)) {
3600
+ const newKey = `${prefix}${name}`;
3601
+ if (!(newKey in targetPkg.scripts)) {
3602
+ targetPkg.scripts[newKey] = String(cmd);
3603
+ }
2705
3604
  }
3605
+ await writeFile(targetPkgPath, JSON.stringify(targetPkg, null, 2), "utf-8");
3606
+ await gitAddAndCommit(targetPath, `feat(template): merge deps for ${slug}`, [targetPkgPath], {
3607
+ skipIfNoStaged: true
3608
+ });
2706
3609
  return {
2707
3610
  success: true,
2708
3611
  applied: true,
2709
- message: `Successfully merged template dependencies and installed packages for ${slug}`
3612
+ message: `Successfully merged template dependencies for ${slug}`
2710
3613
  };
2711
3614
  } catch (error) {
2712
3615
  console.error("Package merge failed:", error);
@@ -2719,32 +3622,30 @@ Be systematic and thorough. Always read the existing package.json first, then me
2719
3622
  }
2720
3623
  }
2721
3624
  });
2722
- var flatInstallStep = createStep({
2723
- id: "flat-install",
2724
- description: "Run a flat install command without specifying packages",
2725
- inputSchema: z.object({
2726
- targetPath: z.string().describe("Path to the project to install packages in")
2727
- }),
2728
- outputSchema: z.object({
2729
- success: z.boolean(),
2730
- message: z.string(),
2731
- details: z.string().optional()
2732
- }),
3625
+ var installStep = createStep({
3626
+ id: "install",
3627
+ description: "Install packages based on merged package.json",
3628
+ inputSchema: InstallInputSchema,
3629
+ outputSchema: InstallResultSchema,
2733
3630
  execute: async ({ inputData, runtimeContext }) => {
2734
- console.log("Running flat install...");
2735
- const targetPath = inputData.targetPath || runtimeContext.get("targetPath") || process.cwd();
3631
+ console.log("Running install step...");
3632
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
2736
3633
  try {
2737
3634
  await spawnSWPM(targetPath, "install", []);
3635
+ const lock = ["pnpm-lock.yaml", "package-lock.json", "yarn.lock"].map((f) => join(targetPath, f)).find((f) => existsSync(f));
3636
+ if (lock) {
3637
+ await gitAddAndCommit(targetPath, `chore(template): commit lockfile after install`, [lock], {
3638
+ skipIfNoStaged: true
3639
+ });
3640
+ }
2738
3641
  return {
2739
- success: true,
2740
- message: "Successfully ran flat install command",
2741
- details: "Installed all dependencies from package.json"
3642
+ success: true
2742
3643
  };
2743
3644
  } catch (error) {
2744
- console.error("Flat install failed:", error);
3645
+ console.error("Install failed:", error);
2745
3646
  return {
2746
3647
  success: false,
2747
- message: `Flat install failed: ${error instanceof Error ? error.message : String(error)}`
3648
+ error: error instanceof Error ? error.message : String(error)
2748
3649
  };
2749
3650
  }
2750
3651
  }
@@ -2752,49 +3653,12 @@ var flatInstallStep = createStep({
2752
3653
  var programmaticFileCopyStep = createStep({
2753
3654
  id: "programmatic-file-copy",
2754
3655
  description: "Programmatically copy template files to target project based on ordered units",
2755
- inputSchema: z.object({
2756
- orderedUnits: z.array(
2757
- z.object({
2758
- kind: z.string(),
2759
- id: z.string(),
2760
- file: z.string()
2761
- })
2762
- ),
2763
- templateDir: z.string(),
2764
- commitSha: z.string(),
2765
- slug: z.string(),
2766
- targetPath: z.string().optional()
2767
- }),
2768
- outputSchema: z.object({
2769
- success: z.boolean(),
2770
- copiedFiles: z.array(
2771
- z.object({
2772
- source: z.string(),
2773
- destination: z.string(),
2774
- unit: z.object({
2775
- kind: z.string(),
2776
- id: z.string()
2777
- })
2778
- })
2779
- ),
2780
- conflicts: z.array(
2781
- z.object({
2782
- unit: z.object({
2783
- kind: z.string(),
2784
- id: z.string()
2785
- }),
2786
- issue: z.string(),
2787
- sourceFile: z.string(),
2788
- targetFile: z.string()
2789
- })
2790
- ),
2791
- message: z.string(),
2792
- error: z.string().optional()
2793
- }),
3656
+ inputSchema: FileCopyInputSchema,
3657
+ outputSchema: FileCopyResultSchema,
2794
3658
  execute: async ({ inputData, runtimeContext }) => {
2795
3659
  console.log("Programmatic file copy step starting...");
2796
3660
  const { orderedUnits, templateDir, commitSha, slug } = inputData;
2797
- const targetPath = inputData.targetPath || runtimeContext.get("targetPath") || process.cwd();
3661
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
2798
3662
  try {
2799
3663
  const copiedFiles = [];
2800
3664
  const conflicts = [];
@@ -2873,7 +3737,7 @@ var programmaticFileCopyStep = createStep({
2873
3737
  const convertedFileName = namingConvention !== "unknown" ? convertNaming(baseId + fileExtension, namingConvention) : baseId + fileExtension;
2874
3738
  const targetFile = resolve(targetPath, targetDir, convertedFileName);
2875
3739
  if (existsSync(targetFile)) {
2876
- const strategy = determineConflictStrategy();
3740
+ const strategy = determineConflictStrategy(unit, targetFile);
2877
3741
  console.log(`File exists: ${convertedFileName}, using strategy: ${strategy}`);
2878
3742
  switch (strategy) {
2879
3743
  case "skip":
@@ -2953,18 +3817,87 @@ var programmaticFileCopyStep = createStep({
2953
3817
  });
2954
3818
  }
2955
3819
  }
2956
- if (copiedFiles.length > 0) {
2957
- try {
2958
- const fileList = copiedFiles.map((f) => f.destination);
2959
- const gitCommand = ["git", "add", ...fileList];
2960
- await exec(gitCommand.join(" "), { cwd: targetPath });
2961
- await exec(
2962
- `git commit -m "feat(template): copy ${copiedFiles.length} files from ${slug}@${commitSha.substring(0, 7)}"`,
2963
- { cwd: targetPath }
2964
- );
2965
- console.log(`\u2713 Committed ${copiedFiles.length} copied files`);
2966
- } catch (commitError) {
2967
- console.warn("Failed to commit copied files:", commitError);
3820
+ try {
3821
+ const targetTsconfig = resolve(targetPath, "tsconfig.json");
3822
+ if (!existsSync(targetTsconfig)) {
3823
+ const templateTsconfig = resolve(templateDir, "tsconfig.json");
3824
+ if (existsSync(templateTsconfig)) {
3825
+ await copyFile(templateTsconfig, targetTsconfig);
3826
+ copiedFiles.push({
3827
+ source: templateTsconfig,
3828
+ destination: targetTsconfig,
3829
+ unit: { kind: "other", id: "tsconfig.json" }
3830
+ });
3831
+ console.log("\u2713 Copied tsconfig.json from template to target");
3832
+ } else {
3833
+ const minimalTsconfig = {
3834
+ compilerOptions: {
3835
+ target: "ES2020",
3836
+ module: "NodeNext",
3837
+ moduleResolution: "NodeNext",
3838
+ strict: false,
3839
+ esModuleInterop: true,
3840
+ skipLibCheck: true,
3841
+ resolveJsonModule: true,
3842
+ outDir: "dist"
3843
+ },
3844
+ include: ["**/*.ts", "**/*.tsx", "**/*.mts", "**/*.cts"],
3845
+ exclude: ["node_modules", "dist", "build", ".next", ".output", ".turbo"]
3846
+ };
3847
+ await writeFile(targetTsconfig, JSON.stringify(minimalTsconfig, null, 2), "utf-8");
3848
+ copiedFiles.push({
3849
+ source: "[generated tsconfig.json]",
3850
+ destination: targetTsconfig,
3851
+ unit: { kind: "other", id: "tsconfig.json" }
3852
+ });
3853
+ console.log("\u2713 Generated minimal tsconfig.json in target");
3854
+ }
3855
+ }
3856
+ } catch (e) {
3857
+ conflicts.push({
3858
+ unit: { kind: "other", id: "tsconfig.json" },
3859
+ issue: `Failed to ensure tsconfig.json: ${e instanceof Error ? e.message : String(e)}`,
3860
+ sourceFile: "tsconfig.json",
3861
+ targetFile: "tsconfig.json"
3862
+ });
3863
+ }
3864
+ try {
3865
+ const targetMastraIndex = resolve(targetPath, "src/mastra/index.ts");
3866
+ if (!existsSync(targetMastraIndex)) {
3867
+ const templateMastraIndex = resolve(templateDir, "src/mastra/index.ts");
3868
+ if (existsSync(templateMastraIndex)) {
3869
+ if (!existsSync(dirname(targetMastraIndex))) {
3870
+ await mkdir(dirname(targetMastraIndex), { recursive: true });
3871
+ }
3872
+ await copyFile(templateMastraIndex, targetMastraIndex);
3873
+ copiedFiles.push({
3874
+ source: templateMastraIndex,
3875
+ destination: targetMastraIndex,
3876
+ unit: { kind: "other", id: "mastra-index" }
3877
+ });
3878
+ console.log("\u2713 Copied src/mastra/index.ts from template to target");
3879
+ }
3880
+ }
3881
+ } catch (e) {
3882
+ conflicts.push({
3883
+ unit: { kind: "other", id: "mastra-index" },
3884
+ issue: `Failed to ensure Mastra index file: ${e instanceof Error ? e.message : String(e)}`,
3885
+ sourceFile: "src/mastra/index.ts",
3886
+ targetFile: "src/mastra/index.ts"
3887
+ });
3888
+ }
3889
+ if (copiedFiles.length > 0) {
3890
+ try {
3891
+ const fileList = copiedFiles.map((f) => f.destination);
3892
+ await gitAddAndCommit(
3893
+ targetPath,
3894
+ `feat(template): copy ${copiedFiles.length} files from ${slug}@${commitSha.substring(0, 7)}`,
3895
+ fileList,
3896
+ { skipIfNoStaged: true }
3897
+ );
3898
+ console.log(`\u2713 Committed ${copiedFiles.length} copied files`);
3899
+ } catch (commitError) {
3900
+ console.warn("Failed to commit copied files:", commitError);
2968
3901
  }
2969
3902
  }
2970
3903
  const message = `Programmatic file copy completed. Copied ${copiedFiles.length} files, ${conflicts.length} conflicts detected.`;
@@ -2977,83 +3910,26 @@ var programmaticFileCopyStep = createStep({
2977
3910
  };
2978
3911
  } catch (error) {
2979
3912
  console.error("Programmatic file copy failed:", error);
2980
- throw new Error(`Programmatic file copy failed: ${error instanceof Error ? error.message : String(error)}`);
3913
+ return {
3914
+ success: false,
3915
+ copiedFiles: [],
3916
+ conflicts: [],
3917
+ message: `Programmatic file copy failed: ${error instanceof Error ? error.message : String(error)}`,
3918
+ error: error instanceof Error ? error.message : String(error)
3919
+ };
2981
3920
  }
2982
3921
  }
2983
3922
  });
2984
3923
  var intelligentMergeStep = createStep({
2985
3924
  id: "intelligent-merge",
2986
3925
  description: "Use AgentBuilder to intelligently merge template files",
2987
- inputSchema: z.object({
2988
- conflicts: z.array(
2989
- z.object({
2990
- unit: z.object({
2991
- kind: z.string(),
2992
- id: z.string()
2993
- }),
2994
- issue: z.string(),
2995
- sourceFile: z.string(),
2996
- targetFile: z.string()
2997
- })
2998
- ),
2999
- copiedFiles: z.array(
3000
- z.object({
3001
- source: z.string(),
3002
- destination: z.string(),
3003
- unit: z.object({
3004
- kind: z.string(),
3005
- id: z.string()
3006
- })
3007
- })
3008
- ),
3009
- templateDir: z.string(),
3010
- commitSha: z.string(),
3011
- slug: z.string(),
3012
- targetPath: z.string().optional()
3013
- }),
3014
- outputSchema: z.object({
3015
- success: z.boolean(),
3016
- applied: z.boolean(),
3017
- message: z.string(),
3018
- conflictsResolved: z.array(
3019
- z.object({
3020
- unit: z.object({
3021
- kind: z.string(),
3022
- id: z.string()
3023
- }),
3024
- issue: z.string(),
3025
- resolution: z.string()
3026
- })
3027
- ),
3028
- error: z.string().optional(),
3029
- branchName: z.string().optional()
3030
- }),
3926
+ inputSchema: IntelligentMergeInputSchema,
3927
+ outputSchema: IntelligentMergeResultSchema,
3031
3928
  execute: async ({ inputData, runtimeContext }) => {
3032
3929
  console.log("Intelligent merge step starting...");
3033
- const { conflicts, copiedFiles, commitSha, slug, templateDir } = inputData;
3034
- const targetPath = inputData.targetPath || runtimeContext.get("targetPath") || process.cwd();
3035
- const baseBranchName = `feat/install-template-${slug}`;
3930
+ const { conflicts, copiedFiles, commitSha, slug, templateDir, branchName } = inputData;
3931
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
3036
3932
  try {
3037
- let branchName = baseBranchName;
3038
- try {
3039
- await exec(`git checkout -b "${branchName}"`, { cwd: targetPath });
3040
- console.log(`Created new branch: ${branchName}`);
3041
- } catch (error) {
3042
- const errorStr = error instanceof Error ? error.message : String(error);
3043
- if (errorStr.includes("already exists")) {
3044
- try {
3045
- await exec(`git checkout "${branchName}"`, { cwd: targetPath });
3046
- console.log(`Switched to existing branch: ${branchName}`);
3047
- } catch {
3048
- const timestamp = Date.now().toString().slice(-6);
3049
- branchName = `${baseBranchName}-${timestamp}`;
3050
- await exec(`git checkout -b "${branchName}"`, { cwd: targetPath });
3051
- console.log(`Created unique branch: ${branchName}`);
3052
- }
3053
- } else {
3054
- throw error;
3055
- }
3056
- }
3057
3933
  const copyFileTool = createTool({
3058
3934
  id: "copy-file",
3059
3935
  description: "Copy a file from template to target project (use only for edge cases - most files are already copied programmatically).",
@@ -3091,7 +3967,7 @@ var intelligentMergeStep = createStep({
3091
3967
  const agentBuilder = new AgentBuilder({
3092
3968
  projectPath: targetPath,
3093
3969
  mode: "template",
3094
- model: openai("gpt-4o-mini"),
3970
+ model: resolveModel2(runtimeContext),
3095
3971
  instructions: `
3096
3972
  You are an expert at integrating Mastra template components into existing projects.
3097
3973
 
@@ -3106,49 +3982,52 @@ CONFLICTS TO RESOLVE:
3106
3982
  ${JSON.stringify(conflicts, null, 2)}
3107
3983
 
3108
3984
  CRITICAL INSTRUCTIONS:
3109
- 1. **When committing changes**: NEVER add dependency/build directories. Use specific file paths with 'git add'
3110
- 2. **Package management**: NO need to install packages (already handled by package merge step)
3111
- 3. **Validation**: When validation fails due to import issues, check existing files and imports for correct naming conventions
3112
- 4. **Variable vs file names**: A variable name might differ from file name (e.g., filename: ./downloaderTool.ts, export const fetcherTool(...))
3113
- 5. **File copying**: Most files are already copied programmatically. Only use copyFile tool for edge cases where additional files are needed
3985
+ 1. **Package management**: NO need to install packages (already handled by package merge step)
3986
+ 2. **File copying**: Most files are already copied programmatically. Only use copyFile tool for edge cases where additional files are needed for conflict resolution
3114
3987
 
3115
3988
  KEY RESPONSIBILITIES:
3116
3989
  1. Resolve any conflicts from the programmatic copy step
3117
3990
  2. Register components in existing Mastra index file (agents, workflows, networks, mcp-servers)
3118
3991
  3. DO NOT register tools in existing Mastra index file - tools should remain standalone
3119
- 4. Fix import path issues in copied files
3120
- 5. Ensure TypeScript imports and exports are correct
3121
- 6. Validate integration works properly
3122
- 7. Copy additional files ONLY if needed for conflict resolution or missing dependencies
3992
+ 4. Copy additional files ONLY if needed for conflict resolution
3993
+
3994
+ MASTRA INDEX FILE HANDLING (src/mastra/index.ts):
3995
+ 1. **Verify the file exists**
3996
+ - Call readFile
3997
+ - If it fails with ENOENT (or listDirectory shows it missing) -> copyFile the template version to src/mastra/index.ts, then confirm it now exists
3998
+ - Always verify after copying that the file exists and is accessible
3999
+
4000
+ 2. **Edit the file**
4001
+ - Always work with the full file content
4002
+ - Generate the complete, correct source (imports, anchors, registrations, formatting)
4003
+ - Keep existing registrations intact and maintain file structure
4004
+ - Ensure proper spacing and organization of new additions
3123
4005
 
3124
- MASTRA-SPECIFIC INTEGRATION:
4006
+ 3. **Handle anchors and structure**
4007
+ - When generating new content, ensure you do not duplicate existing imports or object entries
4008
+ - If required anchors (e.g., agents: {}) are missing, add them while generating the new content
4009
+ - Add missing anchors just before the closing brace of the Mastra config
4010
+ - Do not restructure or reorder existing anchors and registrations
4011
+
4012
+ CRITICAL: ALWAYS use writeFile to update the mastra/index.ts file when needed to register new components.
4013
+
4014
+ MASTRA-SPECIFIC REGISTRATION:
3125
4015
  - Agents: Register in existing Mastra index file
3126
4016
  - Workflows: Register in existing Mastra index file
3127
4017
  - Networks: Register in existing Mastra index file
3128
4018
  - MCP servers: Register in existing Mastra index file
3129
4019
  - Tools: Copy to ${AgentBuilderDefaults.DEFAULT_FOLDER_STRUCTURE.tool} but DO NOT register in existing Mastra index file
3130
-
3131
- EDGE CASE FILE COPYING:
3132
- - IF a file for a resource does not exist in the target project AND was not programmatically copied, you can use copyFile tool
3133
- - When taking files from template, ensure you get the right file name and path
3134
- - Only copy files that are actually needed for the integration to work
3135
-
3136
- NAMING CONVENTION GUIDANCE:
3137
- When fixing imports or understanding naming patterns, use these examples:
3138
-
3139
- **Import Path Patterns:**
3140
- - camelCase files: import { myAgent } from './myAgent'
3141
- - snake_case files: import { myAgent } from './my_agent'
3142
- - kebab-case files: import { myAgent } from './my-agent'
3143
- - PascalCase files: import { MyAgent } from './MyAgent'
3144
-
3145
- **Naming Detection Examples:**
3146
- - Files like "weatherAgent.ts", "chatAgent.ts" \u2192 use camelCase
3147
- - Files like "weather_agent.ts", "chat_agent.ts" \u2192 use snake_case
3148
- - Files like "weather-agent.ts", "chat-agent.ts" \u2192 use kebab-case
3149
- - Files like "WeatherAgent.ts", "ChatAgent.ts" \u2192 use PascalCase
3150
-
3151
- **Key Rule:** Keep variable/export names unchanged - only adapt file names and import paths
4020
+ - If an anchor (e.g., "agents: {") is not found, avoid complex restructuring; instead, insert the missing anchor on a new line (e.g., add "agents: {" just before the closing brace of the Mastra config) and then proceed with the other registrations.
4021
+
4022
+ CONFLICT RESOLUTION AND FILE COPYING:
4023
+ - Only copy files if needed to resolve specific conflicts
4024
+ - When copying files from template:
4025
+ - Ensure you get the right file name and path
4026
+ - Verify the destination directory exists
4027
+ - Maintain the same relative path structure
4028
+ - Only copy files that are actually needed
4029
+ - Preserve existing functionality when resolving conflicts
4030
+ - Focus on registration and conflict resolution, validation will happen in a later step
3152
4031
 
3153
4032
  Template information:
3154
4033
  - Slug: ${slug}
@@ -3169,15 +4048,23 @@ Template information:
3169
4048
  notes: `Unit: ${conflict.unit.kind}:${conflict.unit.id}, Issue: ${conflict.issue}, Source: ${conflict.sourceFile}, Target: ${conflict.targetFile}`
3170
4049
  });
3171
4050
  });
3172
- const nonToolFiles = copiedFiles.filter((f) => f.unit.kind !== "tool");
3173
- if (nonToolFiles.length > 0) {
4051
+ const registrableKinds = /* @__PURE__ */ new Set(["agent", "workflow", "network", "mcp-server"]);
4052
+ const registrableFiles = copiedFiles.filter((f) => registrableKinds.has(f.unit.kind));
4053
+ const targetMastraIndex = resolve(targetPath, "src/mastra/index.ts");
4054
+ const mastraIndexExists = existsSync(targetMastraIndex);
4055
+ console.log(`Mastra index exists: ${mastraIndexExists} at ${targetMastraIndex}`);
4056
+ console.log(
4057
+ "Registrable components:",
4058
+ registrableFiles.map((f) => `${f.unit.kind}:${f.unit.id}`)
4059
+ );
4060
+ if (registrableFiles.length > 0) {
3174
4061
  tasks.push({
3175
4062
  id: "register-components",
3176
- content: `Register ${nonToolFiles.length} components in existing Mastra index file (src/mastra/index.ts)`,
4063
+ content: `Register ${registrableFiles.length} components in existing Mastra index file (src/mastra/index.ts)`,
3177
4064
  status: "pending",
3178
4065
  priority: "medium",
3179
4066
  dependencies: conflicts.length > 0 ? conflicts.map((c) => `conflict-${c.unit.kind}-${c.unit.id}`) : void 0,
3180
- notes: `Components to register: ${nonToolFiles.map((f) => `${f.unit.kind}:${f.unit.id}`).join(", ")}`
4067
+ notes: `Components to register: ${registrableFiles.map((f) => `${f.unit.kind}:${f.unit.id}`).join(", ")}`
3181
4068
  });
3182
4069
  }
3183
4070
  console.log(`Creating task list with ${tasks.length} tasks...`);
@@ -3274,10 +4161,12 @@ Start by listing your tasks and work through them systematically!
3274
4161
  };
3275
4162
  }
3276
4163
  });
4164
+ await gitAddAndCommit(targetPath, `feat(template): apply intelligent merge for ${slug}`, void 0, {
4165
+ skipIfNoStaged: true
4166
+ });
3277
4167
  return {
3278
4168
  success: true,
3279
4169
  applied: true,
3280
- branchName,
3281
4170
  message: `Successfully resolved ${conflicts.length} conflicts from template ${slug}`,
3282
4171
  conflictsResolved: conflictResolutions
3283
4172
  };
@@ -3285,7 +4174,6 @@ Start by listing your tasks and work through them systematically!
3285
4174
  return {
3286
4175
  success: false,
3287
4176
  applied: false,
3288
- branchName: baseBranchName,
3289
4177
  message: `Failed to resolve conflicts: ${error instanceof Error ? error.message : String(error)}`,
3290
4178
  conflictsResolved: [],
3291
4179
  error: error instanceof Error ? error.message : String(error)
@@ -3295,56 +4183,13 @@ Start by listing your tasks and work through them systematically!
3295
4183
  });
3296
4184
  var validationAndFixStep = createStep({
3297
4185
  id: "validation-and-fix",
3298
- description: "Validate the merged template code and fix any validation errors using a specialized agent",
3299
- inputSchema: z.object({
3300
- commitSha: z.string(),
3301
- slug: z.string(),
3302
- targetPath: z.string().optional(),
3303
- templateDir: z.string(),
3304
- orderedUnits: z.array(
3305
- z.object({
3306
- kind: z.string(),
3307
- id: z.string(),
3308
- file: z.string()
3309
- })
3310
- ),
3311
- copiedFiles: z.array(
3312
- z.object({
3313
- source: z.string(),
3314
- destination: z.string(),
3315
- unit: z.object({
3316
- kind: z.string(),
3317
- id: z.string()
3318
- })
3319
- })
3320
- ),
3321
- conflictsResolved: z.array(
3322
- z.object({
3323
- unit: z.object({
3324
- kind: z.string(),
3325
- id: z.string()
3326
- }),
3327
- issue: z.string(),
3328
- resolution: z.string()
3329
- })
3330
- ).optional(),
3331
- maxIterations: z.number().optional().default(5)
3332
- }),
3333
- outputSchema: z.object({
3334
- success: z.boolean(),
3335
- applied: z.boolean(),
3336
- message: z.string(),
3337
- validationResults: z.object({
3338
- valid: z.boolean(),
3339
- errorsFixed: z.number(),
3340
- remainingErrors: z.number()
3341
- }),
3342
- error: z.string().optional()
3343
- }),
4186
+ description: "Validate the merged template code and fix any issues using a specialized agent",
4187
+ inputSchema: ValidationFixInputSchema,
4188
+ outputSchema: ValidationFixResultSchema,
3344
4189
  execute: async ({ inputData, runtimeContext }) => {
3345
4190
  console.log("Validation and fix step starting...");
3346
4191
  const { commitSha, slug, orderedUnits, templateDir, copiedFiles, conflictsResolved, maxIterations = 5 } = inputData;
3347
- const targetPath = inputData.targetPath || runtimeContext.get("targetPath") || process.cwd();
4192
+ const targetPath = resolveTargetPath(inputData, runtimeContext);
3348
4193
  const hasChanges = copiedFiles.length > 0 || conflictsResolved && conflictsResolved.length > 0;
3349
4194
  if (!hasChanges) {
3350
4195
  console.log("\u23ED\uFE0F Skipping validation - no files copied or conflicts resolved");
@@ -3375,26 +4220,84 @@ var validationAndFixStep = createStep({
3375
4220
  - ESLint issues
3376
4221
  - Import/export problems
3377
4222
  - Missing dependencies
4223
+ - Index file structure and exports
4224
+ - Component registration correctness
4225
+ - Naming convention compliance
3378
4226
 
3379
4227
  2. **Fix validation errors systematically**:
3380
4228
  - Use readFile to examine files with errors
3381
- - Use multiEdit to fix issues like missing imports, incorrect paths, syntax errors
4229
+ - Use multiEdit for simple search-replace fixes (single line changes)
4230
+ - Use replaceLines for complex multiline fixes (imports, function signatures, etc.)
3382
4231
  - Use listDirectory to understand project structure when fixing import paths
3383
4232
  - Update file contents to resolve TypeScript and linting issues
3384
4233
 
3385
- 3. **Re-validate after fixes** to ensure all issues are resolved
3386
-
3387
- 4. **Focus on template integration issues**:
3388
- - Files were copied with new names based on unit IDs
3389
- - Original template imports may reference old filenames
3390
- - Missing imports in index files
3391
- - Incorrect file paths in imports
3392
- - Type mismatches after integration
3393
- - Missing exports in barrel files
4234
+ 3. **Choose the right tool for the job**:
4235
+ - multiEdit: Simple replacements, single line changes, small fixes
4236
+ - replaceLines: Multiline imports, function signatures, complex code blocks
4237
+ - writeFile: ONLY for creating new files (never overwrite existing)
4238
+
4239
+ 4. **Create missing files ONLY when necessary**:
4240
+ - Use writeFile ONLY for creating NEW files that don't exist
4241
+ - NEVER overwrite existing files - use multiEdit or replaceLines instead
4242
+ - Common cases: missing barrel files (index.ts), missing config files, missing type definitions
4243
+ - Always check with readFile first to ensure file doesn't exist
4244
+
4245
+ 5. **Fix ALL template integration issues**:
4246
+ - Fix import path issues in copied files
4247
+ - Ensure TypeScript imports and exports are correct
4248
+ - Validate integration works properly
4249
+ - Fix files copied with new names based on unit IDs
4250
+ - Update original template imports that reference old filenames
4251
+ - Fix missing imports in index files
4252
+ - Fix incorrect file paths in imports
4253
+ - Fix type mismatches after integration
4254
+ - Fix missing exports in barrel files
3394
4255
  - Use the COPIED FILES mapping below to fix import paths
4256
+ - Fix any missing dependencies or module resolution issues
4257
+
4258
+ 6. **Validate index file structure**:
4259
+ - Correct imports for all components
4260
+ - Proper anchor structure (agents: {}, etc.)
4261
+ - No duplicate registrations
4262
+ - Correct export names and paths
4263
+ - Proper formatting and organization
4264
+
4265
+ 7. **Follow naming conventions**:
4266
+ Import paths:
4267
+ - camelCase: import { myAgent } from './myAgent'
4268
+ - snake_case: import { myAgent } from './my_agent'
4269
+ - kebab-case: import { myAgent } from './my-agent'
4270
+ - PascalCase: import { MyAgent } from './MyAgent'
4271
+
4272
+ File names:
4273
+ - camelCase: weatherAgent.ts, chatAgent.ts
4274
+ - snake_case: weather_agent.ts, chat_agent.ts
4275
+ - kebab-case: weather-agent.ts, chat-agent.ts
4276
+ - PascalCase: WeatherAgent.ts, ChatAgent.ts
4277
+
4278
+ Key Rule: Keep variable/export names unchanged, only adapt file names and import paths
4279
+
4280
+ 8. **Re-validate after fixes** to ensure all issues are resolved
3395
4281
 
3396
4282
  CRITICAL: Always validate the entire project first to get a complete picture of issues, then fix them systematically, and re-validate to confirm fixes worked.
3397
4283
 
4284
+ CRITICAL TOOL SELECTION GUIDE:
4285
+ - **multiEdit**: Use for simple string replacements, single-line changes
4286
+ Example: changing './oldPath' to './newPath'
4287
+
4288
+ - **replaceLines**: Use for multiline fixes, complex code structures
4289
+ Example: fixing multiline imports, function signatures, or code blocks
4290
+ Usage: replaceLines({ filePath: 'file.ts', startLine: 5, endLine: 8, newContent: 'new multiline content' })
4291
+
4292
+ - **writeFile**: ONLY for creating new files that don't exist
4293
+ Example: creating missing index.ts barrel files
4294
+
4295
+ CRITICAL WRITEFIL\u0415 SAFETY RULES:
4296
+ - ONLY use writeFile for creating NEW files that don't exist
4297
+ - ALWAYS check with readFile first to verify file doesn't exist
4298
+ - NEVER use writeFile to overwrite existing files - use multiEdit or replaceLines instead
4299
+ - Common valid uses: missing index.ts barrel files, missing type definitions, missing config files
4300
+
3398
4301
  CRITICAL IMPORT PATH RESOLUTION:
3399
4302
  The following files were copied from template with new names:
3400
4303
  ${JSON.stringify(copiedFiles, null, 2)}
@@ -3415,11 +4318,13 @@ INTEGRATED UNITS:
3415
4318
  ${JSON.stringify(orderedUnits, null, 2)}
3416
4319
 
3417
4320
  Be thorough and methodical. Always use listDirectory to verify actual file existence before fixing imports.`,
3418
- model: openai("gpt-4o-mini"),
4321
+ model: resolveModel2(runtimeContext),
3419
4322
  tools: {
3420
4323
  validateCode: allTools.validateCode,
3421
4324
  readFile: allTools.readFile,
4325
+ writeFile: allTools.writeFile,
3422
4326
  multiEdit: allTools.multiEdit,
4327
+ replaceLines: allTools.replaceLines,
3423
4328
  listDirectory: allTools.listDirectory,
3424
4329
  executeCommand: allTools.executeCommand
3425
4330
  }
@@ -3480,10 +4385,12 @@ Previous iterations may have fixed some issues, so start by re-running validateC
3480
4385
  currentIteration++;
3481
4386
  }
3482
4387
  try {
3483
- await exec(
3484
- `git add . && git commit -m "fix(template): resolve validation errors for ${slug}@${commitSha.substring(0, 7)}" || true`,
4388
+ await gitAddAndCommit(
4389
+ targetPath,
4390
+ `fix(template): resolve validation errors for ${slug}@${commitSha.substring(0, 7)}`,
4391
+ void 0,
3485
4392
  {
3486
- cwd: targetPath
4393
+ skipIfNoStaged: true
3487
4394
  }
3488
4395
  );
3489
4396
  } catch (commitError) {
@@ -3522,10 +4429,10 @@ Previous iterations may have fixed some issues, so start by re-running validateC
3522
4429
  }
3523
4430
  }
3524
4431
  });
3525
- var mergeTemplateWorkflow = createWorkflow({
3526
- id: "merge-template",
4432
+ var agentBuilderTemplateWorkflow = createWorkflow({
4433
+ id: "agent-builder-template",
3527
4434
  description: "Merges a Mastra template repository into the current project using intelligent AgentBuilder-powered merging",
3528
- inputSchema: MergeInputSchema,
4435
+ inputSchema: AgentBuilderInputSchema,
3529
4436
  outputSchema: ApplyResultSchema,
3530
4437
  steps: [
3531
4438
  cloneTemplateStep,
@@ -3533,15 +4440,36 @@ var mergeTemplateWorkflow = createWorkflow({
3533
4440
  discoverUnitsStep,
3534
4441
  orderUnitsStep,
3535
4442
  packageMergeStep,
3536
- flatInstallStep,
4443
+ installStep,
3537
4444
  programmaticFileCopyStep,
3538
4445
  intelligentMergeStep,
3539
4446
  validationAndFixStep
3540
4447
  ]
3541
- }).then(cloneTemplateStep).parallel([analyzePackageStep, discoverUnitsStep]).map(async ({ getStepResult }) => {
4448
+ }).then(cloneTemplateStep).map(async ({ getStepResult }) => {
4449
+ const cloneResult = getStepResult(cloneTemplateStep);
4450
+ if (shouldAbortWorkflow(cloneResult)) {
4451
+ throw new Error(`Critical failure in clone step: ${cloneResult.error}`);
4452
+ }
4453
+ return cloneResult;
4454
+ }).parallel([analyzePackageStep, discoverUnitsStep]).map(async ({ getStepResult }) => {
4455
+ const analyzeResult = getStepResult(analyzePackageStep);
3542
4456
  const discoverResult = getStepResult(discoverUnitsStep);
4457
+ if (shouldAbortWorkflow(analyzeResult)) {
4458
+ throw new Error(`Failure in analyze package step: ${analyzeResult.error || "Package analysis failed"}`);
4459
+ }
4460
+ if (shouldAbortWorkflow(discoverResult)) {
4461
+ throw new Error(`Failure in discover units step: ${discoverResult.error || "Unit discovery failed"}`);
4462
+ }
3543
4463
  return discoverResult;
3544
4464
  }).then(orderUnitsStep).map(async ({ getStepResult, getInitData }) => {
4465
+ const cloneResult = getStepResult(cloneTemplateStep);
4466
+ const initData = getInitData();
4467
+ return {
4468
+ commitSha: cloneResult.commitSha,
4469
+ slug: cloneResult.slug,
4470
+ targetPath: initData.targetPath
4471
+ };
4472
+ }).then(prepareBranchStep).map(async ({ getStepResult, getInitData }) => {
3545
4473
  const cloneResult = getStepResult(cloneTemplateStep);
3546
4474
  const packageResult = getStepResult(analyzePackageStep);
3547
4475
  const initData = getInitData();
@@ -3556,10 +4484,14 @@ var mergeTemplateWorkflow = createWorkflow({
3556
4484
  return {
3557
4485
  targetPath: initData.targetPath
3558
4486
  };
3559
- }).then(flatInstallStep).map(async ({ getStepResult, getInitData }) => {
4487
+ }).then(installStep).map(async ({ getStepResult, getInitData }) => {
3560
4488
  const cloneResult = getStepResult(cloneTemplateStep);
3561
4489
  const orderResult = getStepResult(orderUnitsStep);
4490
+ const installResult = getStepResult(installStep);
3562
4491
  const initData = getInitData();
4492
+ if (shouldAbortWorkflow(installResult)) {
4493
+ throw new Error(`Failure in install step: ${installResult.error || "Install failed"}`);
4494
+ }
3563
4495
  return {
3564
4496
  orderedUnits: orderResult.orderedUnits,
3565
4497
  templateDir: cloneResult.templateDir,
@@ -3594,15 +4526,31 @@ var mergeTemplateWorkflow = createWorkflow({
3594
4526
  copiedFiles: copyResult.copiedFiles,
3595
4527
  conflictsResolved: mergeResult.conflictsResolved
3596
4528
  };
3597
- }).then(validationAndFixStep).map(async ({ getStepResult, getInitData }) => {
3598
- const validationResult = getStepResult(validationAndFixStep);
3599
- const intelligentMergeResult = getStepResult(intelligentMergeStep);
3600
- const copyResult = getStepResult(programmaticFileCopyStep);
4529
+ }).then(validationAndFixStep).map(async ({ getStepResult }) => {
3601
4530
  const cloneResult = getStepResult(cloneTemplateStep);
3602
- const initData = getInitData();
3603
- const branchName = intelligentMergeResult.branchName || `feat/install-template-${cloneResult.slug || initData.slug}`;
3604
- const allErrors = [copyResult.error, intelligentMergeResult.error, validationResult.error].filter(Boolean);
3605
- const overallSuccess = copyResult.success !== false && intelligentMergeResult.success !== false && validationResult.success;
4531
+ const analyzeResult = getStepResult(analyzePackageStep);
4532
+ const discoverResult = getStepResult(discoverUnitsStep);
4533
+ const orderResult = getStepResult(orderUnitsStep);
4534
+ const prepareBranchResult = getStepResult(prepareBranchStep);
4535
+ const packageMergeResult = getStepResult(packageMergeStep);
4536
+ const installResult = getStepResult(installStep);
4537
+ const copyResult = getStepResult(programmaticFileCopyStep);
4538
+ const intelligentMergeResult = getStepResult(intelligentMergeStep);
4539
+ const validationResult = getStepResult(validationAndFixStep);
4540
+ const branchName = prepareBranchResult.branchName;
4541
+ const allErrors = [
4542
+ cloneResult.error,
4543
+ analyzeResult.error,
4544
+ discoverResult.error,
4545
+ orderResult.error,
4546
+ prepareBranchResult.error,
4547
+ packageMergeResult.error,
4548
+ installResult.error,
4549
+ copyResult.error,
4550
+ intelligentMergeResult.error,
4551
+ validationResult.error
4552
+ ].filter(Boolean);
4553
+ const overallSuccess = cloneResult.success !== false && analyzeResult.success !== false && discoverResult.success !== false && orderResult.success !== false && prepareBranchResult.success !== false && packageMergeResult.success !== false && installResult.success !== false && copyResult.success !== false && intelligentMergeResult.success !== false && validationResult.success !== false;
3606
4554
  const messages = [];
3607
4555
  if (copyResult.copiedFiles?.length > 0) {
3608
4556
  messages.push(`${copyResult.copiedFiles.length} files copied`);
@@ -3627,6 +4575,13 @@ var mergeTemplateWorkflow = createWorkflow({
3627
4575
  branchName,
3628
4576
  // Additional debugging info
3629
4577
  stepResults: {
4578
+ cloneSuccess: cloneResult.success,
4579
+ analyzeSuccess: analyzeResult.success,
4580
+ discoverSuccess: discoverResult.success,
4581
+ orderSuccess: orderResult.success,
4582
+ prepareBranchSuccess: prepareBranchResult.success,
4583
+ packageMergeSuccess: packageMergeResult.success,
4584
+ installSuccess: installResult.success,
3630
4585
  copySuccess: copyResult.success,
3631
4586
  mergeSuccess: intelligentMergeResult.success,
3632
4587
  validationSuccess: validationResult.success,
@@ -3636,15 +4591,1412 @@ var mergeTemplateWorkflow = createWorkflow({
3636
4591
  }
3637
4592
  };
3638
4593
  }).commit();
4594
+ async function mergeTemplateBySlug(slug, targetPath) {
4595
+ const template = await getMastraTemplate(slug);
4596
+ const run = await agentBuilderTemplateWorkflow.createRunAsync();
4597
+ return await run.start({
4598
+ inputData: {
4599
+ repo: template.githubUrl,
4600
+ slug: template.slug,
4601
+ targetPath
4602
+ }
4603
+ });
4604
+ }
3639
4605
  var determineConflictStrategy = (_unit, _targetFile) => {
3640
4606
  return "skip";
3641
4607
  };
4608
+ var shouldAbortWorkflow = (stepResult) => {
4609
+ return stepResult?.success === false || stepResult?.error;
4610
+ };
4611
+ var TaskSchema = z.array(
4612
+ z.object({
4613
+ id: z.string().describe("Unique task ID using kebab-case"),
4614
+ content: z.string().describe("Specific, actionable task description"),
4615
+ status: z.enum(["pending", "in_progress", "completed", "blocked"]).default("pending"),
4616
+ priority: z.enum(["high", "medium", "low"]).describe("Task priority"),
4617
+ dependencies: z.array(z.string()).optional().describe("IDs of tasks this depends on"),
4618
+ notes: z.string().describe("Detailed implementation notes and specifics")
4619
+ })
4620
+ );
4621
+ var QuestionSchema = z.array(
4622
+ z.object({
4623
+ id: z.string().describe("Unique question ID"),
4624
+ question: z.string().describe("Clear, specific question for the user"),
4625
+ type: z.enum(["choice", "text", "boolean"]).describe("Type of answer expected"),
4626
+ options: z.array(z.string()).optional().describe("Options for choice questions"),
4627
+ context: z.string().optional().describe("Additional context or explanation")
4628
+ })
4629
+ );
4630
+ var PlanningIterationResultSchema = z.object({
4631
+ success: z.boolean(),
4632
+ tasks: TaskSchema,
4633
+ questions: QuestionSchema,
4634
+ reasoning: z.string(),
4635
+ planComplete: z.boolean(),
4636
+ message: z.string(),
4637
+ error: z.string().optional(),
4638
+ allPreviousQuestions: z.array(z.any()).optional(),
4639
+ allPreviousAnswers: z.record(z.string()).optional()
4640
+ });
4641
+
4642
+ // src/workflows/task-planning/prompts.ts
4643
+ var taskPlanningPrompts = {
4644
+ planningAgent: {
4645
+ instructions: (context) => `You are a Mastra workflow planning expert. Your task is to create a detailed, executable task plan.
4646
+
4647
+ PLANNING RESPONSIBILITIES:
4648
+ 1. **Analyze Requirements**: Review the user's description and requirements thoroughly
4649
+ 2. **Identify Decision Points**: Find any choices that require user input (email providers, databases, APIs, etc.)
4650
+ 3. **Create Specific Tasks**: Generate concrete, actionable tasks with clear implementation notes
4651
+ 4. **Ask Clarifying Questions**: If any decisions are unclear, formulate specific questions for the user
4652
+ - do not ask about package managers
4653
+ - Assume the user is going to use zod for validation
4654
+ - You do not need to ask questions if you have none
4655
+ - NEVER ask questions that have already been answered before
4656
+ 5. **Incorporate Feedback**: Use any previous answers or feedback to refine the plan
4657
+
4658
+ ${context.storedQAPairs.length > 0 ? `PREVIOUS QUESTION-ANSWER PAIRS (${context.storedQAPairs.length} total):
4659
+ ${context.storedQAPairs.map(
4660
+ (pair, index) => `${index + 1}. Q: ${pair.question.question}
4661
+ A: ${pair.answer || "NOT ANSWERED YET"}
4662
+ Type: ${pair.question.type}
4663
+ Asked: ${pair.askedAt}
4664
+ ${pair.answer ? `Answered: ${pair.answeredAt}` : ""}`
4665
+ ).join("\n\n")}
4666
+
4667
+ IMPORTANT: DO NOT ASK ANY QUESTIONS THAT HAVE ALREADY BEEN ASKED!` : ""}
4668
+
4669
+ Based on the context and any user answers, create or refine the task plan.`,
4670
+ refinementPrompt: (context) => `Refine the existing task plan based on all user answers collected so far.
4671
+
4672
+ ANSWERED QUESTIONS AND RESPONSES:
4673
+ ${context.storedQAPairs.filter((pair) => pair.answer).map(
4674
+ (pair, index) => `${index + 1}. Q: ${pair.question.question}
4675
+ A: ${pair.answer}
4676
+ Context: ${pair.question.context || "None"}`
4677
+ ).join("\n\n")}
4678
+
4679
+ REQUIREMENTS:
4680
+ - Action: ${context.action}
4681
+ - Workflow Name: ${context.workflowName || "To be determined"}
4682
+ - Description: ${context.description || "Not specified"}
4683
+ - Requirements: ${context.requirements || "Not specified"}
4684
+
4685
+ PROJECT CONTEXT:
4686
+ - Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
4687
+ - Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
4688
+ - Research: ${JSON.stringify(context.research, null, 2)}
4689
+
4690
+ ${context.hasTaskFeedback ? `
4691
+ USER FEEDBACK ON PREVIOUS TASK LIST:
4692
+ ${context.userAnswers?.taskFeedback}
4693
+
4694
+ PLEASE INCORPORATE THIS FEEDBACK INTO THE REFINED TASK LIST.` : ""}
4695
+
4696
+ Refine the task list and determine if any additional questions are needed.`,
4697
+ initialPrompt: (context) => `Create an initial task plan for ${context.action}ing a Mastra workflow.
4698
+
4699
+ REQUIREMENTS:
4700
+ - Action: ${context.action}
4701
+ - Workflow Name: ${context.workflowName || "To be determined"}
4702
+ - Description: ${context.description || "Not specified"}
4703
+ - Requirements: ${context.requirements || "Not specified"}
4704
+
4705
+ PROJECT CONTEXT:
4706
+ - Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
4707
+ - Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
4708
+ - Research: ${JSON.stringify(context.research, null, 2)}
4709
+
4710
+ Create specific tasks and identify any questions that need user clarification.`
4711
+ },
4712
+ taskApproval: {
4713
+ message: (questionsCount) => `Please answer ${questionsCount} question(s) to finalize the workflow plan:`,
4714
+ approvalMessage: (tasksCount) => `Please review and approve the ${tasksCount} task(s) for execution:`
4715
+ }
4716
+ };
4717
+ var WorkflowBuilderInputSchema = z.object({
4718
+ workflowName: z.string().optional().describe("Name of the workflow to create or edit"),
4719
+ action: z.enum(["create", "edit"]).describe("Action to perform: create new or edit existing workflow"),
4720
+ description: z.string().optional().describe("Description of what the workflow should do"),
4721
+ requirements: z.string().optional().describe("Detailed requirements for the workflow"),
4722
+ projectPath: z.string().optional().describe("Path to the Mastra project (defaults to current directory)")
4723
+ });
4724
+ var DiscoveredWorkflowSchema = z.object({
4725
+ name: z.string(),
4726
+ file: z.string(),
4727
+ description: z.string().optional(),
4728
+ inputSchema: z.any().optional(),
4729
+ outputSchema: z.any().optional(),
4730
+ steps: z.array(z.string()).optional()
4731
+ });
4732
+ var WorkflowDiscoveryResultSchema = z.object({
4733
+ success: z.boolean(),
4734
+ workflows: z.array(DiscoveredWorkflowSchema),
4735
+ mastraIndexExists: z.boolean(),
4736
+ message: z.string(),
4737
+ error: z.string().optional()
4738
+ });
4739
+ var ProjectDiscoveryResultSchema = z.object({
4740
+ success: z.boolean(),
4741
+ structure: z.object({
4742
+ hasWorkflowsDir: z.boolean(),
4743
+ hasAgentsDir: z.boolean(),
4744
+ hasToolsDir: z.boolean(),
4745
+ hasMastraIndex: z.boolean(),
4746
+ existingWorkflows: z.array(z.string()),
4747
+ existingAgents: z.array(z.string()),
4748
+ existingTools: z.array(z.string())
4749
+ }),
4750
+ dependencies: z.record(z.string()),
4751
+ message: z.string(),
4752
+ error: z.string().optional()
4753
+ });
4754
+ var WorkflowResearchResultSchema = z.object({
4755
+ success: z.boolean(),
4756
+ documentation: z.object({
4757
+ workflowPatterns: z.array(z.string()),
4758
+ stepExamples: z.array(z.string()),
4759
+ bestPractices: z.array(z.string())
4760
+ }),
4761
+ webResources: z.array(
4762
+ z.object({
4763
+ title: z.string(),
4764
+ url: z.string(),
4765
+ snippet: z.string(),
4766
+ relevance: z.number()
4767
+ })
4768
+ ),
4769
+ message: z.string(),
4770
+ error: z.string().optional()
4771
+ });
4772
+ var TaskManagementResultSchema = z.object({
4773
+ success: z.boolean(),
4774
+ tasks: TaskSchema,
4775
+ message: z.string(),
4776
+ error: z.string().optional()
4777
+ });
4778
+ var TaskExecutionInputSchema = z.object({
4779
+ action: z.enum(["create", "edit"]),
4780
+ workflowName: z.string().optional(),
4781
+ description: z.string().optional(),
4782
+ requirements: z.string().optional(),
4783
+ tasks: TaskSchema,
4784
+ discoveredWorkflows: z.array(z.any()),
4785
+ projectStructure: z.any(),
4786
+ research: z.any(),
4787
+ projectPath: z.string().optional()
4788
+ });
4789
+ var TaskExecutionSuspendSchema = z.object({
4790
+ questions: QuestionSchema,
4791
+ currentProgress: z.string(),
4792
+ completedTasks: z.array(z.string()),
4793
+ message: z.string()
4794
+ });
4795
+ var TaskExecutionResumeSchema = z.object({
4796
+ answers: z.array(
4797
+ z.object({
4798
+ questionId: z.string(),
4799
+ answer: z.string()
4800
+ })
4801
+ )
4802
+ });
4803
+ var TaskExecutionResultSchema = z.object({
4804
+ success: z.boolean(),
4805
+ filesModified: z.array(z.string()),
4806
+ validationResults: z.object({
4807
+ passed: z.boolean(),
4808
+ errors: z.array(z.string()),
4809
+ warnings: z.array(z.string())
4810
+ }),
4811
+ completedTasks: z.array(z.string()),
4812
+ message: z.string(),
4813
+ error: z.string().optional()
4814
+ });
4815
+ z.object({
4816
+ questions: QuestionSchema
4817
+ });
4818
+ z.object({
4819
+ answers: z.record(z.string()),
4820
+ hasAnswers: z.boolean()
4821
+ });
4822
+ var WorkflowBuilderResultSchema = z.object({
4823
+ success: z.boolean(),
4824
+ action: z.enum(["create", "edit"]),
4825
+ workflowName: z.string().optional(),
4826
+ workflowFile: z.string().optional(),
4827
+ discovery: WorkflowDiscoveryResultSchema.optional(),
4828
+ projectStructure: ProjectDiscoveryResultSchema.optional(),
4829
+ research: WorkflowResearchResultSchema.optional(),
4830
+ planning: PlanningIterationResultSchema.optional(),
4831
+ taskManagement: TaskManagementResultSchema.optional(),
4832
+ execution: TaskExecutionResultSchema.optional(),
4833
+ needsUserInput: z.boolean().optional(),
4834
+ questions: QuestionSchema.optional(),
4835
+ message: z.string(),
4836
+ nextSteps: z.array(z.string()).optional(),
4837
+ error: z.string().optional()
4838
+ });
4839
+ var TaskExecutionIterationInputSchema = (taskLength) => z.object({
4840
+ status: z.enum(["in_progress", "completed", "needs_clarification"]).describe('Status - only use "completed" when ALL remaining tasks are finished'),
4841
+ progress: z.string().describe("Current progress description"),
4842
+ completedTasks: z.array(z.string()).describe("List of ALL completed task IDs (including previously completed ones)"),
4843
+ totalTasksRequired: z.number().describe(`Total number of tasks that must be completed (should be ${taskLength})`),
4844
+ tasksRemaining: z.array(z.string()).describe("List of task IDs that still need to be completed"),
4845
+ filesModified: z.array(z.string()).describe("List of files that were created or modified - use these exact paths for validateCode tool"),
4846
+ questions: QuestionSchema.optional().describe("Questions for user if clarification is needed"),
4847
+ message: z.string().describe("Summary of work completed or current status"),
4848
+ error: z.string().optional().describe("Any errors encountered")
4849
+ });
4850
+
4851
+ // src/workflows/task-planning/schema.ts
4852
+ var PlanningIterationInputSchema = z.object({
4853
+ action: z.enum(["create", "edit"]),
4854
+ workflowName: z.string().optional(),
4855
+ description: z.string().optional(),
4856
+ requirements: z.string().optional(),
4857
+ discoveredWorkflows: z.array(DiscoveredWorkflowSchema),
4858
+ projectStructure: ProjectDiscoveryResultSchema,
4859
+ research: WorkflowResearchResultSchema,
4860
+ userAnswers: z.record(z.string()).optional()
4861
+ });
4862
+ var PlanningIterationSuspendSchema = z.object({
4863
+ questions: QuestionSchema,
4864
+ message: z.string(),
4865
+ currentPlan: z.object({
4866
+ tasks: TaskSchema,
4867
+ reasoning: z.string()
4868
+ })
4869
+ });
4870
+ var PlanningIterationResumeSchema = z.object({
4871
+ answers: z.record(z.string())
4872
+ });
4873
+ var PlanningAgentOutputSchema = z.object({
4874
+ tasks: TaskSchema,
4875
+ questions: QuestionSchema.optional(),
4876
+ reasoning: z.string().describe("Explanation of the plan and any questions"),
4877
+ planComplete: z.boolean().describe("Whether the plan is ready for execution (no more questions)")
4878
+ });
4879
+ var TaskApprovalOutputSchema = z.object({
4880
+ approved: z.boolean(),
4881
+ tasks: TaskSchema,
4882
+ message: z.string(),
4883
+ userFeedback: z.string().optional()
4884
+ });
4885
+ var TaskApprovalSuspendSchema = z.object({
4886
+ taskList: TaskSchema,
4887
+ summary: z.string(),
4888
+ message: z.string()
4889
+ });
4890
+ var TaskApprovalResumeSchema = z.object({
4891
+ approved: z.boolean(),
4892
+ modifications: z.string().optional()
4893
+ });
4894
+
4895
+ // src/workflows/task-planning/task-planning.ts
4896
+ var planningIterationStep = createStep$1({
4897
+ id: "planning-iteration",
4898
+ description: "Create or refine task plan with user input",
4899
+ inputSchema: PlanningIterationInputSchema,
4900
+ outputSchema: PlanningIterationResultSchema,
4901
+ suspendSchema: PlanningIterationSuspendSchema,
4902
+ resumeSchema: PlanningIterationResumeSchema,
4903
+ execute: async ({ inputData, resumeData, suspend, runtimeContext }) => {
4904
+ const {
4905
+ action,
4906
+ workflowName,
4907
+ description,
4908
+ requirements,
4909
+ discoveredWorkflows,
4910
+ projectStructure,
4911
+ research,
4912
+ userAnswers
4913
+ } = inputData;
4914
+ console.log("Starting planning iteration...");
4915
+ const qaKey = "workflow-builder-qa";
4916
+ let storedQAPairs = runtimeContext.get(qaKey) || [];
4917
+ const newAnswers = { ...userAnswers || {}, ...resumeData?.answers || {} };
4918
+ console.log("before", storedQAPairs);
4919
+ console.log("newAnswers", newAnswers);
4920
+ if (Object.keys(newAnswers).length > 0) {
4921
+ storedQAPairs = storedQAPairs.map((pair) => {
4922
+ if (newAnswers[pair.question.id]) {
4923
+ return {
4924
+ ...pair,
4925
+ answer: newAnswers[pair.question.id] || null,
4926
+ answeredAt: (/* @__PURE__ */ new Date()).toISOString()
4927
+ };
4928
+ }
4929
+ return pair;
4930
+ });
4931
+ runtimeContext.set(qaKey, storedQAPairs);
4932
+ }
4933
+ console.log("after", storedQAPairs);
4934
+ console.log(
4935
+ `Current Q&A state: ${storedQAPairs.length} question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
4936
+ );
4937
+ try {
4938
+ const planningAgent = new Agent$1({
4939
+ model: resolveModel(runtimeContext),
4940
+ instructions: taskPlanningPrompts.planningAgent.instructions({
4941
+ storedQAPairs
4942
+ }),
4943
+ name: "Workflow Planning Agent"
4944
+ // tools: filteredMcpTools,
4945
+ });
4946
+ const hasTaskFeedback = Boolean(userAnswers && userAnswers.taskFeedback);
4947
+ const planningPrompt = storedQAPairs.some((pair) => pair.answer) ? taskPlanningPrompts.planningAgent.refinementPrompt({
4948
+ action,
4949
+ workflowName,
4950
+ description,
4951
+ requirements,
4952
+ discoveredWorkflows,
4953
+ projectStructure,
4954
+ research,
4955
+ storedQAPairs,
4956
+ hasTaskFeedback,
4957
+ userAnswers
4958
+ }) : taskPlanningPrompts.planningAgent.initialPrompt({
4959
+ action,
4960
+ workflowName,
4961
+ description,
4962
+ requirements,
4963
+ discoveredWorkflows,
4964
+ projectStructure,
4965
+ research
4966
+ });
4967
+ const result = await planningAgent.generateVNext(planningPrompt, {
4968
+ output: PlanningAgentOutputSchema
4969
+ // maxSteps: 15,
4970
+ });
4971
+ const planResult = await result.object;
4972
+ if (!planResult) {
4973
+ return {
4974
+ tasks: [],
4975
+ success: false,
4976
+ questions: [],
4977
+ reasoning: "Planning agent failed to generate a valid response",
4978
+ planComplete: false,
4979
+ message: "Planning failed"
4980
+ };
4981
+ }
4982
+ if (planResult.questions && planResult.questions.length > 0 && !planResult.planComplete) {
4983
+ console.log(`Planning needs user clarification: ${planResult.questions.length} questions`);
4984
+ console.log(planResult.questions);
4985
+ const newQAPairs = planResult.questions.map((question) => ({
4986
+ question,
4987
+ answer: null,
4988
+ askedAt: (/* @__PURE__ */ new Date()).toISOString(),
4989
+ answeredAt: null
4990
+ }));
4991
+ storedQAPairs = [...storedQAPairs, ...newQAPairs];
4992
+ runtimeContext.set(qaKey, storedQAPairs);
4993
+ console.log(
4994
+ `Updated Q&A state: ${storedQAPairs.length} total question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
4995
+ );
4996
+ return suspend({
4997
+ questions: planResult.questions,
4998
+ message: taskPlanningPrompts.taskApproval.message(planResult.questions.length),
4999
+ currentPlan: {
5000
+ tasks: planResult.tasks,
5001
+ reasoning: planResult.reasoning
5002
+ }
5003
+ });
5004
+ }
5005
+ console.log(`Planning complete with ${planResult.tasks.length} tasks`);
5006
+ runtimeContext.set(qaKey, storedQAPairs);
5007
+ console.log(
5008
+ `Final Q&A state: ${storedQAPairs.length} total question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
5009
+ );
5010
+ return {
5011
+ tasks: planResult.tasks,
5012
+ success: true,
5013
+ questions: [],
5014
+ reasoning: planResult.reasoning,
5015
+ planComplete: true,
5016
+ message: `Successfully created ${planResult.tasks.length} tasks`,
5017
+ allPreviousQuestions: storedQAPairs.map((pair) => pair.question),
5018
+ allPreviousAnswers: Object.fromEntries(
5019
+ storedQAPairs.filter((pair) => pair.answer).map((pair) => [pair.question.id, pair.answer])
5020
+ )
5021
+ };
5022
+ } catch (error) {
5023
+ console.error("Planning iteration failed:", error);
5024
+ return {
5025
+ tasks: [],
5026
+ success: false,
5027
+ questions: [],
5028
+ reasoning: `Planning failed: ${error instanceof Error ? error.message : String(error)}`,
5029
+ planComplete: false,
5030
+ message: "Planning iteration failed",
5031
+ error: error instanceof Error ? error.message : String(error),
5032
+ allPreviousQuestions: storedQAPairs.map((pair) => pair.question),
5033
+ allPreviousAnswers: Object.fromEntries(
5034
+ storedQAPairs.filter((pair) => pair.answer).map((pair) => [pair.question.id, pair.answer])
5035
+ )
5036
+ };
5037
+ }
5038
+ }
5039
+ });
5040
+ var taskApprovalStep = createStep$1({
5041
+ id: "task-approval",
5042
+ description: "Get user approval for the final task list",
5043
+ inputSchema: PlanningIterationResultSchema,
5044
+ outputSchema: TaskApprovalOutputSchema,
5045
+ suspendSchema: TaskApprovalSuspendSchema,
5046
+ resumeSchema: TaskApprovalResumeSchema,
5047
+ execute: async ({ inputData, resumeData, suspend }) => {
5048
+ const { tasks } = inputData;
5049
+ if (!resumeData?.approved && resumeData?.approved !== false) {
5050
+ console.log(`Requesting user approval for ${tasks.length} tasks`);
5051
+ const summary = `Task List for Approval:
5052
+
5053
+ ${tasks.length} tasks planned:
5054
+ ${tasks.map((task, i) => `${i + 1}. [${task.priority.toUpperCase()}] ${task.content}${task.dependencies?.length ? ` (depends on: ${task.dependencies.join(", ")})` : ""}
5055
+ Notes: ${task.notes || "None"}`).join("\n")}`;
5056
+ return suspend({
5057
+ taskList: tasks,
5058
+ summary,
5059
+ message: taskPlanningPrompts.taskApproval.approvalMessage(tasks.length)
5060
+ });
5061
+ }
5062
+ if (resumeData.approved) {
5063
+ console.log("Task list approved by user");
5064
+ return {
5065
+ approved: true,
5066
+ tasks,
5067
+ message: "Task list approved, ready for execution"
5068
+ };
5069
+ } else {
5070
+ console.log("Task list rejected by user");
5071
+ return {
5072
+ approved: false,
5073
+ tasks,
5074
+ message: "Task list rejected",
5075
+ userFeedback: resumeData.modifications
5076
+ };
5077
+ }
5078
+ }
5079
+ });
5080
+ var planningAndApprovalWorkflow = createWorkflow$1({
5081
+ id: "planning-and-approval",
5082
+ description: "Handle iterative planning with questions and task list approval",
5083
+ inputSchema: PlanningIterationInputSchema,
5084
+ outputSchema: TaskApprovalOutputSchema,
5085
+ steps: [planningIterationStep, taskApprovalStep]
5086
+ }).dountil(planningIterationStep, async ({ inputData }) => {
5087
+ console.log(`Sub-workflow planning check: planComplete=${inputData.planComplete}`);
5088
+ return inputData.planComplete === true;
5089
+ }).map(async ({ inputData }) => {
5090
+ return {
5091
+ tasks: inputData.tasks || [],
5092
+ success: inputData.success || false,
5093
+ questions: inputData.questions || [],
5094
+ reasoning: inputData.reasoning || "",
5095
+ planComplete: inputData.planComplete || false,
5096
+ message: inputData.message || ""
5097
+ };
5098
+ }).then(taskApprovalStep).commit();
5099
+
5100
+ // src/workflows/workflow-builder/prompts.ts
5101
+ var workflowResearch = `
5102
+ ## \u{1F50D} **COMPREHENSIVE MASTRA WORKFLOW RESEARCH SUMMARY**
5103
+
5104
+ Based on extensive research of Mastra documentation and examples, here's essential information for building effective Mastra workflows:
5105
+
5106
+ ### **\u{1F4CB} WORKFLOW FUNDAMENTALS**
5107
+
5108
+ **Core Components:**
5109
+ - **\`createWorkflow()\`**: Main factory function that creates workflow instances
5110
+ - **\`createStep()\`**: Creates individual workflow steps with typed inputs/outputs
5111
+ - **\`.commit()\`**: Finalizes workflow definition (REQUIRED to make workflows executable)
5112
+ - **Zod schemas**: Used for strict input/output typing and validation
5113
+
5114
+ **Basic Structure:**
5115
+ \`\`\`typescript
5116
+ import { createWorkflow, createStep } from "@mastra/core/workflows";
5117
+ import { z } from "zod";
5118
+
5119
+ const workflow = createWorkflow({
5120
+ id: "unique-workflow-id", // Required: kebab-case recommended
5121
+ description: "What this workflow does", // Optional but recommended
5122
+ inputSchema: z.object({...}), // Required: Defines workflow inputs
5123
+ outputSchema: z.object({...}) // Required: Defines final outputs
5124
+ })
5125
+ .then(step1) // Chain steps sequentially
5126
+ .then(step2)
5127
+ .commit(); // CRITICAL: Makes workflow executable
5128
+ \`\`\`
5129
+
5130
+ ### **\u{1F527} STEP CREATION PATTERNS**
5131
+
5132
+ **Standard Step Definition:**
5133
+ \`\`\`typescript
5134
+ const myStep = createStep({
5135
+ id: "step-id", // Required: unique identifier
5136
+ description: "Step description", // Recommended for clarity
5137
+ inputSchema: z.object({...}), // Required: input validation
5138
+ outputSchema: z.object({...}), // Required: output validation
5139
+ execute: async ({ inputData, mastra, getStepResult, getInitData }) => {
5140
+ // Step logic here
5141
+ return { /* matches outputSchema */ };
5142
+ }
5143
+ });
5144
+ \`\`\`
5145
+
5146
+ **Execute Function Parameters:**
5147
+ - \`inputData\`: Validated input matching inputSchema
5148
+ - \`mastra\`: Access to Mastra instance (agents, tools, other workflows)
5149
+ - \`getStepResult(stepInstance)\`: Get results from previous steps
5150
+ - \`getInitData()\`: Access original workflow input data
5151
+ - \`runtimeContext\`: Runtime dependency injection context
5152
+ - \`runCount\`: Number of times this step has run (useful for retries)
5153
+
5154
+ ### **\u{1F504} CONTROL FLOW METHODS**
5155
+
5156
+ **Sequential Execution:**
5157
+ - \`.then(step)\`: Execute steps one after another
5158
+ - Data flows automatically if schemas match
5159
+
5160
+ **Parallel Execution:**
5161
+ - \`.parallel([step1, step2])\`: Run steps simultaneously
5162
+ - All parallel steps complete before continuing
5163
+
5164
+ **Conditional Logic:**
5165
+ - \`.branch([[condition, step], [condition, step]])\`: Execute different steps based on conditions
5166
+ - Conditions evaluated sequentially, matching steps run in parallel
5167
+
5168
+ **Loops:**
5169
+ - \`.dountil(step, condition)\`: Repeat until condition becomes true
5170
+ - \`.dowhile(step, condition)\`: Repeat while condition is true
5171
+ - \`.foreach(step, {concurrency: N})\`: Execute step for each array item
5172
+
5173
+ **Data Transformation:**
5174
+ - \`.map(({ inputData, getStepResult, getInitData }) => transformedData)\`: Transform data between steps
5175
+
5176
+ ### **\u23F8\uFE0F SUSPEND & RESUME CAPABILITIES**
5177
+
5178
+ **For Human-in-the-Loop Workflows:**
5179
+ \`\`\`typescript
5180
+ const userInputStep = createStep({
5181
+ id: "user-input",
5182
+ suspendSchema: z.object({}), // Schema for suspension payload
5183
+ resumeSchema: z.object({ // Schema for resume data
5184
+ userResponse: z.string()
5185
+ }),
5186
+ execute: async ({ resumeData, suspend }) => {
5187
+ if (!resumeData?.userResponse) {
5188
+ await suspend({}); // Pause workflow
5189
+ return { response: "" };
5190
+ }
5191
+ return { response: resumeData.userResponse };
5192
+ }
5193
+ });
5194
+ \`\`\`
5195
+
5196
+ **Resume Workflow:**
5197
+ \`\`\`typescript
5198
+ const result = await run.start({ inputData: {...} });
5199
+ if (result.status === "suspended") {
5200
+ await run.resume({
5201
+ step: result.suspended[0], // Or specific step ID
5202
+ resumeData: { userResponse: "answer" }
5203
+ });
5204
+ }
5205
+ \`\`\`
5206
+
5207
+ ### **\u{1F6E0}\uFE0F INTEGRATING AGENTS & TOOLS**
5208
+
5209
+ **Using Agents in Steps:**
5210
+ \`\`\`typescript
5211
+ // Method 1: Agent as step
5212
+ const agentStep = createStep(myAgent);
5213
+
5214
+ // Method 2: Call agent in execute function
5215
+ const step = createStep({
5216
+ execute: async ({ inputData }) => {
5217
+ const result = await myAgent.generate(prompt);
5218
+ return { output: result.text };
5219
+ }
5220
+ });
5221
+ \`\`\`
5222
+
5223
+ **Using Tools in Steps:**
5224
+ \`\`\`typescript
5225
+ // Method 1: Tool as step
5226
+ const toolStep = createStep(myTool);
5227
+
5228
+ // Method 2: Call tool in execute function
5229
+ const step = createStep({
5230
+ execute: async ({ inputData, runtimeContext }) => {
5231
+ const result = await myTool.execute({
5232
+ context: inputData,
5233
+ runtimeContext
5234
+ });
5235
+ return result;
5236
+ }
5237
+ });
5238
+ \`\`\`
5239
+
5240
+ ### **\u{1F5C2}\uFE0F PROJECT ORGANIZATION PATTERNS**
5241
+
5242
+ **MANDATORY Workflow Organization:**
5243
+ Each workflow MUST be organized in its own dedicated folder with separated concerns:
5244
+
5245
+ \`\`\`
5246
+ src/mastra/workflows/
5247
+ \u251C\u2500\u2500 my-workflow-name/ # Kebab-case folder name
5248
+ \u2502 \u251C\u2500\u2500 types.ts # All Zod schemas and TypeScript types
5249
+ \u2502 \u251C\u2500\u2500 steps.ts # All individual step definitions
5250
+ \u2502 \u251C\u2500\u2500 workflow.ts # Main workflow composition and export
5251
+ \u2502 \u2514\u2500\u2500 utils.ts # Helper functions (if needed)
5252
+ \u251C\u2500\u2500 another-workflow/
5253
+ \u2502 \u251C\u2500\u2500 types.ts
5254
+ \u2502 \u251C\u2500\u2500 steps.ts
5255
+ \u2502 \u251C\u2500\u2500 workflow.ts
5256
+ \u2502 \u2514\u2500\u2500 utils.ts
5257
+ \u2514\u2500\u2500 index.ts # Export all workflows
5258
+ \`\`\`
5259
+
5260
+ **CRITICAL File Organization Rules:**
5261
+ - **ALWAYS create a dedicated folder** for each workflow
5262
+ - **Folder names MUST be kebab-case** version of workflow name
5263
+ - **types.ts**: Define all input/output schemas, validation types, and interfaces
5264
+ - **steps.ts**: Create all individual step definitions using createStep()
5265
+ - **workflow.ts**: Compose steps into workflow using createWorkflow() and export the final workflow
5266
+ - **utils.ts**: Any helper functions, constants, or utilities (create only if needed)
5267
+ - **NEVER put everything in one file** - always separate concerns properly
5268
+
5269
+ **Workflow Registration:**
5270
+ \`\`\`typescript
5271
+ // src/mastra/index.ts
5272
+ export const mastra = new Mastra({
5273
+ workflows: {
5274
+ sendEmailWorkflow, // Use camelCase for keys
5275
+ dataProcessingWorkflow
5276
+ },
5277
+ storage: new LibSQLStore({ url: 'file:./mastra.db' }), // Required for suspend/resume
5278
+ });
5279
+ \`\`\`
5280
+
5281
+ ### **\u{1F4E6} ESSENTIAL DEPENDENCIES**
5282
+
5283
+ **Required Packages:**
5284
+ \`\`\`json
5285
+ {
5286
+ "dependencies": {
5287
+ "@mastra/core": "latest",
5288
+ "zod": "^3.25.67"
5289
+ }
5290
+ }
5291
+ \`\`\`
5292
+
5293
+ **Additional Packages (as needed):**
5294
+ - \`@mastra/libsql\`: For workflow state persistence
5295
+ - \`@ai-sdk/openai\`: For AI model integration
5296
+ - \`ai\`: For AI SDK functionality
5297
+
5298
+ ### **\u2705 WORKFLOW BEST PRACTICES**
5299
+
5300
+ **Schema Design:**
5301
+ - Use descriptive property names in schemas
5302
+ - Make schemas as specific as possible (avoid \`z.any()\`)
5303
+ - Include validation for required business logic
5304
+
5305
+ **Error Handling:**
5306
+ - Use \`try/catch\` blocks in step execute functions
5307
+ - Return meaningful error messages
5308
+ - Consider using \`bail()\` for early successful exits
5309
+
5310
+ **Step Organization:**
5311
+ - Keep steps focused on single responsibilities
5312
+ - Use descriptive step IDs (kebab-case recommended)
5313
+ - Create reusable steps for common operations
5314
+
5315
+ **Data Flow:**
5316
+ - Use \`.map()\` when schemas don't align between steps
5317
+ - Access previous step results with \`getStepResult(stepInstance)\`
5318
+ - Use \`getInitData()\` to access original workflow input
5319
+
5320
+ ### **\u{1F680} EXECUTION PATTERNS**
5321
+
5322
+ **Running Workflows:**
5323
+ \`\`\`typescript
5324
+ // Create and start run
5325
+ const run = await workflow.createRunAsync();
5326
+ const result = await run.start({ inputData: {...} });
5327
+
5328
+ // Stream execution for real-time monitoring
5329
+ const stream = await run.streamVNext({ inputData: {...} });
5330
+ for await (const chunk of stream) {
5331
+ console.log(chunk);
5332
+ }
5333
+
5334
+ // Watch for events
5335
+ run.watch((event) => console.log(event));
5336
+ \`\`\`
5337
+
5338
+ **Workflow Status Types:**
5339
+ - \`"success"\`: Completed successfully
5340
+ - \`"suspended"\`: Paused awaiting input
5341
+ - \`"failed"\`: Encountered error
5342
+
5343
+ ### **\u{1F517} ADVANCED FEATURES**
5344
+
5345
+ **Nested Workflows:**
5346
+ - Use workflows as steps: \`.then(otherWorkflow)\`
5347
+ - Enable complex workflow composition
5348
+
5349
+ **Runtime Context:**
5350
+ - Pass shared data across all steps
5351
+ - Enable dependency injection patterns
5352
+
5353
+ **Streaming & Events:**
5354
+ - Real-time workflow monitoring
5355
+ - Integration with external event systems
5356
+
5357
+ **Cloning:**
5358
+ - \`cloneWorkflow(original, {id: "new-id"})\`: Reuse workflow structure
5359
+ - \`cloneStep(original, {id: "new-id"})\`: Reuse step logic
5360
+
5361
+ This comprehensive research provides the foundation for creating robust, maintainable Mastra workflows with proper typing, error handling, and architectural patterns.
5362
+ `;
5363
+ var workflowBuilderPrompts = {
5364
+ researchAgent: {
5365
+ instructions: `You are a Mastra workflow research expert. Your task is to gather relevant information about creating Mastra workflows.
5366
+
5367
+ RESEARCH OBJECTIVES:
5368
+ 1. **Core Concepts**: Understand how Mastra workflows work
5369
+ 2. **Best Practices**: Learn workflow patterns and conventions
5370
+ 3. **Code Examples**: Find relevant implementation examples
5371
+ 4. **Technical Details**: Understand schemas, steps, and configuration
5372
+
5373
+ Use the available documentation and examples tools to gather comprehensive information about Mastra workflows.`,
5374
+ prompt: (context) => `Research everything about Mastra workflows to help create or edit them effectively.
5375
+
5376
+ PROJECT CONTEXT:
5377
+ - Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
5378
+ - Dependencies: ${JSON.stringify(context.dependencies, null, 2)}
5379
+ - Has Workflows Directory: ${context.hasWorkflowsDir}
5380
+
5381
+ Focus on:
5382
+ 1. How to create workflows using createWorkflow()
5383
+ 2. How to create and chain workflow steps
5384
+ 3. Best practices for workflow organization
5385
+ 4. Common workflow patterns and examples
5386
+ 5. Schema definitions and types
5387
+ 6. Error handling and debugging
5388
+
5389
+ Use the docs and examples tools to gather comprehensive information.`
5390
+ },
5391
+ executionAgent: {
5392
+ instructions: (context) => `You are executing a workflow ${context.action} task for: "${context.workflowName}"
5393
+
5394
+ CRITICAL WORKFLOW EXECUTION REQUIREMENTS:
5395
+ 1. **EXPLORE PROJECT STRUCTURE FIRST**: Use listDirectory and readFile tools to understand the existing project layout, folder structure, and conventions before creating any files
5396
+ 2. **FOLLOW PROJECT CONVENTIONS**: Look at existing workflows, agents, and file structures to understand where new files should be placed (typically src/mastra/workflows/, src/mastra/agents/, etc.)
5397
+ 3. **USE PRE-LOADED TASK LIST**: Your task list has been pre-populated in the taskManager tool. Use taskManager with action 'list' to see all tasks, and action 'update' to mark progress
5398
+ 4. **COMPLETE EVERY SINGLE TASK**: You MUST complete ALL ${context.tasksLength} tasks that are already in the taskManager. Do not stop until every task is marked as 'completed'
5399
+ 5. **Follow Task Dependencies**: Execute tasks in the correct order, respecting dependencies
5400
+ 6. **Request User Input When Needed**: If you encounter choices (like email providers, databases, etc.) that require user decision, return questions for clarification
5401
+ 7. **STRICT WORKFLOW ORGANIZATION**: When creating or editing workflows, you MUST follow this exact structure
5402
+
5403
+ MANDATORY WORKFLOW FOLDER STRUCTURE:
5404
+ When ${context.action === "create" ? "creating a new workflow" : "editing a workflow"}, you MUST organize files as follows:
5405
+
5406
+ \u{1F4C1} src/mastra/workflows/${context.workflowName?.toLowerCase().replace(/[^a-z0-9]/g, "-") || "new-workflow"}/
5407
+ \u251C\u2500\u2500 \u{1F4C4} types.ts # All Zod schemas and TypeScript types
5408
+ \u251C\u2500\u2500 \u{1F4C4} steps.ts # All individual step definitions
5409
+ \u251C\u2500\u2500 \u{1F4C4} workflow.ts # Main workflow composition and export
5410
+ \u2514\u2500\u2500 \u{1F4C4} utils.ts # Helper functions (if needed)
5411
+
5412
+ CRITICAL FILE ORGANIZATION RULES:
5413
+ - **ALWAYS create a dedicated folder** for the workflow in src/mastra/workflows/
5414
+ - **Folder name MUST be kebab-case** version of workflow name
5415
+ - **types.ts**: Define all input/output schemas, validation types, and interfaces
5416
+ - **steps.ts**: Create all individual step definitions using createStep()
5417
+ - **workflow.ts**: Compose steps into workflow using createWorkflow() and export the final workflow
5418
+ - **utils.ts**: Any helper functions, constants, or utilities (create only if needed)
5419
+ - **NEVER put everything in one file** - always separate concerns properly
5420
+
5421
+ CRITICAL COMPLETION REQUIREMENTS:
5422
+ - ALWAYS explore the directory structure before creating files to understand where they should go
5423
+ - You MUST complete ALL ${context.tasksLength} tasks before returning status='completed'
5424
+ - Use taskManager tool with action 'list' to see your current task list and action 'update' to mark tasks as 'in_progress' or 'completed'
5425
+ - If you need to make any decisions during implementation (choosing providers, configurations, etc.), return questions for user clarification
5426
+ - DO NOT make assumptions about file locations - explore first!
5427
+ - You cannot finish until ALL tasks in the taskManager are marked as 'completed'
5428
+
5429
+ PROJECT CONTEXT:
5430
+ - Action: ${context.action}
5431
+ - Workflow Name: ${context.workflowName}
5432
+ - Project Path: ${context.currentProjectPath}
5433
+ - Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
5434
+ - Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
5435
+
5436
+ AVAILABLE RESEARCH:
5437
+ ${JSON.stringify(context.research, null, 2)}
5438
+
5439
+ PRE-LOADED TASK LIST (${context.tasksLength} tasks already in taskManager):
5440
+ ${context.tasks.map((task) => `- ${task.id}: ${task.content} (Priority: ${task.priority})`).join("\n")}
5441
+
5442
+ ${context.resumeData ? `USER PROVIDED ANSWERS: ${JSON.stringify(context.resumeData.answers, null, 2)}` : ""}
5443
+
5444
+ Start by exploring the project structure, then use 'taskManager' with action 'list' to see your pre-loaded tasks, and work through each task systematically.`,
5445
+ prompt: (context) => context.resumeData ? `Continue working on the task list. The user has provided answers to your questions: ${JSON.stringify(context.resumeData.answers, null, 2)}.
5446
+
5447
+ CRITICAL: You must complete ALL ${context.tasks.length} tasks that are pre-loaded in the taskManager. Use the taskManager tool with action 'list' to check your progress and continue with the next tasks. Do not stop until every single task is marked as 'completed'.` : `Begin executing the pre-loaded task list to ${context.action} the workflow "${context.workflowName}".
5448
+
5449
+ CRITICAL REQUIREMENTS:
5450
+ - Your ${context.tasks.length} tasks have been PRE-LOADED into the taskManager tool
5451
+ - Start by exploring the project directory structure using listDirectory and readFile tools to understand:
5452
+ - Where workflows are typically stored (look for src/mastra/workflows/ or similar)
5453
+ - What the existing file structure looks like
5454
+ - How other workflows are organized and named
5455
+ - Where agent files are stored if needed
5456
+ - Then use taskManager with action 'list' to see your pre-loaded tasks
5457
+ - Use taskManager with action 'update' to mark tasks as 'in_progress' or 'completed'
5458
+
5459
+ CRITICAL FILE ORGANIZATION RULES:
5460
+ - **ALWAYS create a dedicated folder** for the workflow in src/mastra/workflows/
5461
+ - **Folder name MUST be kebab-case** version of workflow name
5462
+ - **NEVER put everything in one file** - separate types, steps, and workflow composition
5463
+ - Follow the 4-file structure above for maximum maintainability and clarity
5464
+
5465
+ - DO NOT return status='completed' until ALL ${context.tasks.length} tasks are marked as 'completed' in the taskManager
5466
+
5467
+ PRE-LOADED TASKS (${context.tasks.length} total tasks in taskManager):
5468
+ ${context.tasks.map((task, index) => `${index + 1}. [${task.id}] ${task.content}`).join("\n")}
5469
+
5470
+ Use taskManager with action 'list' to see the current status of all tasks. You must complete every single one before finishing.`,
5471
+ iterationPrompt: (context) => `Continue working on the remaining tasks. You have already completed these tasks: [${context.completedTasks.map((t) => t.id).join(", ")}]
5472
+
5473
+ REMAINING TASKS TO COMPLETE (${context.pendingTasks.length} tasks):
5474
+ ${context.pendingTasks.map((task, index) => `${index + 1}. [${task.id}] ${task.content}`).join("\n")}
5475
+
5476
+ CRITICAL: You must complete ALL of these remaining ${context.pendingTasks.length} tasks. Use taskManager with action 'list' to check current status and action 'update' to mark tasks as completed.
5477
+
5478
+ ${context.resumeData ? `USER PROVIDED ANSWERS: ${JSON.stringify(context.resumeData.answers, null, 2)}` : ""}`
5479
+ },
5480
+ validation: {
5481
+ instructions: `CRITICAL VALIDATION INSTRUCTIONS:
5482
+ - When using the validateCode tool, ALWAYS pass the specific files you created or modified using the 'files' parameter
5483
+ - The tool uses a hybrid validation approach: fast syntax checking \u2192 semantic type checking \u2192 ESLint
5484
+ - This is much faster than full project compilation and only shows errors from your specific files
5485
+ - Example: validateCode({ validationType: ['types', 'lint'], files: ['src/workflows/my-workflow.ts', 'src/agents/my-agent.ts'] })
5486
+ - ALWAYS validate after creating or modifying files to ensure they compile correctly`
5487
+ }
5488
+ };
5489
+ var restrictedTaskManager = createTool$1({
5490
+ id: "task-manager",
5491
+ description: "View and update your pre-loaded task list. You can only mark tasks as in_progress or completed, not create new tasks.",
5492
+ inputSchema: z.object({
5493
+ action: z.enum(["list", "update", "complete"]).describe("List tasks, update status, or mark complete - tasks are pre-loaded"),
5494
+ tasks: z.array(
5495
+ z.object({
5496
+ id: z.string().describe("Task ID - must match existing task"),
5497
+ content: z.string().optional().describe("Task content (read-only)"),
5498
+ status: z.enum(["pending", "in_progress", "completed", "blocked"]).describe("Task status"),
5499
+ priority: z.enum(["high", "medium", "low"]).optional().describe("Task priority (read-only)"),
5500
+ dependencies: z.array(z.string()).optional().describe("Task dependencies (read-only)"),
5501
+ notes: z.string().optional().describe("Additional notes or progress updates")
5502
+ })
5503
+ ).optional().describe("Tasks to update (status and notes only)"),
5504
+ taskId: z.string().optional().describe("Specific task ID for single task operations")
5505
+ }),
5506
+ outputSchema: z.object({
5507
+ success: z.boolean(),
5508
+ tasks: z.array(
5509
+ z.object({
5510
+ id: z.string(),
5511
+ content: z.string(),
5512
+ status: z.string(),
5513
+ priority: z.string(),
5514
+ dependencies: z.array(z.string()).optional(),
5515
+ notes: z.string().optional(),
5516
+ createdAt: z.string(),
5517
+ updatedAt: z.string()
5518
+ })
5519
+ ),
5520
+ message: z.string()
5521
+ }),
5522
+ execute: async ({ context }) => {
5523
+ const adaptedContext = {
5524
+ ...context,
5525
+ action: context.action,
5526
+ tasks: context.tasks?.map((task) => ({
5527
+ ...task,
5528
+ priority: task.priority || "medium"
5529
+ }))
5530
+ };
5531
+ return await AgentBuilderDefaults.manageTaskList(adaptedContext);
5532
+ }
5533
+ });
5534
+
5535
+ // src/workflows/workflow-builder/workflow-builder.ts
5536
+ var workflowDiscoveryStep = createStep({
5537
+ id: "workflow-discovery",
5538
+ description: "Discover existing workflows in the project",
5539
+ inputSchema: WorkflowBuilderInputSchema,
5540
+ outputSchema: WorkflowDiscoveryResultSchema,
5541
+ execute: async ({ inputData, runtimeContext: _runtimeContext }) => {
5542
+ console.log("Starting workflow discovery...");
5543
+ const { projectPath = process.cwd() } = inputData;
5544
+ try {
5545
+ const workflowsPath = join(projectPath, "src/mastra/workflows");
5546
+ if (!existsSync(workflowsPath)) {
5547
+ console.log("No workflows directory found");
5548
+ return {
5549
+ success: true,
5550
+ workflows: [],
5551
+ mastraIndexExists: existsSync(join(projectPath, "src/mastra/index.ts")),
5552
+ message: "No existing workflows found in the project"
5553
+ };
5554
+ }
5555
+ const workflowFiles = await readdir(workflowsPath);
5556
+ const workflows = [];
5557
+ for (const fileName of workflowFiles) {
5558
+ if (fileName.endsWith(".ts") && !fileName.endsWith(".test.ts")) {
5559
+ const filePath = join(workflowsPath, fileName);
5560
+ try {
5561
+ const content = await readFile(filePath, "utf-8");
5562
+ const nameMatch = content.match(/createWorkflow\s*\(\s*{\s*id:\s*['"]([^'"]+)['"]/);
5563
+ const descMatch = content.match(/description:\s*['"]([^'"]*)['"]/);
5564
+ if (nameMatch && nameMatch[1]) {
5565
+ workflows.push({
5566
+ name: nameMatch[1],
5567
+ file: filePath,
5568
+ description: descMatch?.[1] ?? "No description available"
5569
+ });
5570
+ }
5571
+ } catch (error) {
5572
+ console.warn(`Failed to read workflow file ${filePath}:`, error);
5573
+ }
5574
+ }
5575
+ }
5576
+ console.log(`Discovered ${workflows.length} existing workflows`);
5577
+ return {
5578
+ success: true,
5579
+ workflows,
5580
+ mastraIndexExists: existsSync(join(projectPath, "src/mastra/index.ts")),
5581
+ message: workflows.length > 0 ? `Found ${workflows.length} existing workflow(s): ${workflows.map((w) => w.name).join(", ")}` : "No existing workflows found in the project"
5582
+ };
5583
+ } catch (error) {
5584
+ console.error("Workflow discovery failed:", error);
5585
+ return {
5586
+ success: false,
5587
+ workflows: [],
5588
+ mastraIndexExists: false,
5589
+ message: `Workflow discovery failed: ${error instanceof Error ? error.message : String(error)}`,
5590
+ error: error instanceof Error ? error.message : String(error)
5591
+ };
5592
+ }
5593
+ }
5594
+ });
5595
+ var projectDiscoveryStep = createStep({
5596
+ id: "project-discovery",
5597
+ description: "Analyze the project structure and setup",
5598
+ inputSchema: WorkflowDiscoveryResultSchema,
5599
+ outputSchema: ProjectDiscoveryResultSchema,
5600
+ execute: async ({ inputData: _inputData, runtimeContext: _runtimeContext }) => {
5601
+ console.log("Starting project discovery...");
5602
+ try {
5603
+ const projectPath = process.cwd();
5604
+ const projectStructure = {
5605
+ hasPackageJson: existsSync(join(projectPath, "package.json")),
5606
+ hasMastraConfig: existsSync(join(projectPath, "mastra.config.js")) || existsSync(join(projectPath, "mastra.config.ts")),
5607
+ hasSrcDirectory: existsSync(join(projectPath, "src")),
5608
+ hasMastraDirectory: existsSync(join(projectPath, "src/mastra")),
5609
+ hasWorkflowsDirectory: existsSync(join(projectPath, "src/mastra/workflows")),
5610
+ hasToolsDirectory: existsSync(join(projectPath, "src/mastra/tools")),
5611
+ hasAgentsDirectory: existsSync(join(projectPath, "src/mastra/agents"))
5612
+ };
5613
+ let packageInfo = null;
5614
+ if (projectStructure.hasPackageJson) {
5615
+ try {
5616
+ const packageContent = await readFile(join(projectPath, "package.json"), "utf-8");
5617
+ packageInfo = JSON.parse(packageContent);
5618
+ } catch (error) {
5619
+ console.warn("Failed to read package.json:", error);
5620
+ }
5621
+ }
5622
+ console.log("Project discovery completed");
5623
+ return {
5624
+ success: true,
5625
+ structure: {
5626
+ hasWorkflowsDir: projectStructure.hasWorkflowsDirectory,
5627
+ hasAgentsDir: projectStructure.hasAgentsDirectory,
5628
+ hasToolsDir: projectStructure.hasToolsDirectory,
5629
+ hasMastraIndex: existsSync(join(projectPath, "src/mastra/index.ts")),
5630
+ existingWorkflows: [],
5631
+ existingAgents: [],
5632
+ existingTools: []
5633
+ },
5634
+ dependencies: packageInfo?.dependencies || {},
5635
+ message: "Project discovery completed successfully"
5636
+ };
5637
+ } catch (error) {
5638
+ console.error("Project discovery failed:", error);
5639
+ return {
5640
+ success: false,
5641
+ structure: {
5642
+ hasWorkflowsDir: false,
5643
+ hasAgentsDir: false,
5644
+ hasToolsDir: false,
5645
+ hasMastraIndex: false,
5646
+ existingWorkflows: [],
5647
+ existingAgents: [],
5648
+ existingTools: []
5649
+ },
5650
+ dependencies: {},
5651
+ message: "Project discovery failed",
5652
+ error: error instanceof Error ? error.message : String(error)
5653
+ };
5654
+ }
5655
+ }
5656
+ });
5657
+ var workflowResearchStep = createStep({
5658
+ id: "workflow-research",
5659
+ description: "Research Mastra workflows and gather relevant documentation",
5660
+ inputSchema: ProjectDiscoveryResultSchema,
5661
+ outputSchema: WorkflowResearchResultSchema,
5662
+ execute: async ({ inputData, runtimeContext }) => {
5663
+ console.log("Starting workflow research...");
5664
+ try {
5665
+ const researchAgent = new Agent({
5666
+ model: resolveModel(runtimeContext),
5667
+ instructions: workflowBuilderPrompts.researchAgent.instructions,
5668
+ name: "Workflow Research Agent"
5669
+ // tools: filteredMcpTools,
5670
+ });
5671
+ const researchPrompt = workflowBuilderPrompts.researchAgent.prompt({
5672
+ projectStructure: inputData.structure,
5673
+ dependencies: inputData.dependencies,
5674
+ hasWorkflowsDir: inputData.structure.hasWorkflowsDir
5675
+ });
5676
+ const result = await researchAgent.generateVNext(researchPrompt, {
5677
+ output: WorkflowResearchResultSchema
5678
+ // stopWhen: stepCountIs(10),
5679
+ });
5680
+ const researchResult = await result.object;
5681
+ if (!researchResult) {
5682
+ return {
5683
+ success: false,
5684
+ documentation: {
5685
+ workflowPatterns: [],
5686
+ stepExamples: [],
5687
+ bestPractices: []
5688
+ },
5689
+ webResources: [],
5690
+ message: "Research agent failed to generate valid response",
5691
+ error: "Research agent failed to generate valid response"
5692
+ };
5693
+ }
5694
+ console.log("Research completed successfully");
5695
+ return {
5696
+ success: true,
5697
+ documentation: {
5698
+ workflowPatterns: researchResult.documentation.workflowPatterns,
5699
+ stepExamples: researchResult.documentation.stepExamples,
5700
+ bestPractices: researchResult.documentation.bestPractices
5701
+ },
5702
+ webResources: researchResult.webResources,
5703
+ message: "Research completed successfully"
5704
+ };
5705
+ } catch (error) {
5706
+ console.error("Workflow research failed:", error);
5707
+ return {
5708
+ success: false,
5709
+ documentation: {
5710
+ workflowPatterns: [],
5711
+ stepExamples: [],
5712
+ bestPractices: []
5713
+ },
5714
+ webResources: [],
5715
+ message: "Research failed",
5716
+ error: error instanceof Error ? error.message : String(error)
5717
+ };
5718
+ }
5719
+ }
5720
+ });
5721
+ var taskExecutionStep = createStep({
5722
+ id: "task-execution",
5723
+ description: "Execute the approved task list to create or edit the workflow",
5724
+ inputSchema: TaskExecutionInputSchema,
5725
+ outputSchema: TaskExecutionResultSchema,
5726
+ suspendSchema: TaskExecutionSuspendSchema,
5727
+ resumeSchema: TaskExecutionResumeSchema,
5728
+ execute: async ({ inputData, resumeData, suspend, runtimeContext }) => {
5729
+ const {
5730
+ action,
5731
+ workflowName,
5732
+ description: _description,
5733
+ requirements: _requirements,
5734
+ tasks,
5735
+ discoveredWorkflows,
5736
+ projectStructure,
5737
+ research,
5738
+ projectPath
5739
+ } = inputData;
5740
+ console.log(`Starting task execution for ${action}ing workflow: ${workflowName}`);
5741
+ console.log(`Executing ${tasks.length} tasks using AgentBuilder stream...`);
5742
+ try {
5743
+ const currentProjectPath = projectPath || process.cwd();
5744
+ console.log("Pre-populating taskManager with planned tasks...");
5745
+ const taskManagerContext = {
5746
+ action: "create",
5747
+ tasks: tasks.map((task) => ({
5748
+ id: task.id,
5749
+ content: task.content,
5750
+ status: "pending",
5751
+ priority: task.priority,
5752
+ dependencies: task.dependencies,
5753
+ notes: task.notes
5754
+ }))
5755
+ };
5756
+ const taskManagerResult = await AgentBuilderDefaults.manageTaskList(taskManagerContext);
5757
+ console.log(`Task manager initialized with ${taskManagerResult.tasks.length} tasks`);
5758
+ if (!taskManagerResult.success) {
5759
+ throw new Error(`Failed to initialize task manager: ${taskManagerResult.message}`);
5760
+ }
5761
+ const executionAgent = new AgentBuilder({
5762
+ projectPath: currentProjectPath,
5763
+ model: resolveModel(runtimeContext),
5764
+ tools: {
5765
+ "task-manager": restrictedTaskManager
5766
+ },
5767
+ instructions: `${workflowBuilderPrompts.executionAgent.instructions({
5768
+ action,
5769
+ workflowName,
5770
+ tasksLength: tasks.length,
5771
+ currentProjectPath,
5772
+ discoveredWorkflows,
5773
+ projectStructure,
5774
+ research,
5775
+ tasks,
5776
+ resumeData
5777
+ })}
5778
+
5779
+ ${workflowBuilderPrompts.validation.instructions}`
5780
+ });
5781
+ const executionPrompt = workflowBuilderPrompts.executionAgent.prompt({
5782
+ action,
5783
+ workflowName,
5784
+ tasks,
5785
+ resumeData
5786
+ });
5787
+ const originalInstructions = await executionAgent.getInstructions({ runtimeContext });
5788
+ const additionalInstructions = executionAgent.instructions;
5789
+ let enhancedInstructions = originalInstructions;
5790
+ if (additionalInstructions) {
5791
+ enhancedInstructions = `${originalInstructions}
5792
+
5793
+ ${additionalInstructions}`;
5794
+ }
5795
+ const enhancedOptions = {
5796
+ stopWhen: stepCountIs(100),
5797
+ temperature: 0.3,
5798
+ instructions: enhancedInstructions
5799
+ };
5800
+ let finalResult = null;
5801
+ let allTasksCompleted = false;
5802
+ let iterationCount = 0;
5803
+ const maxIterations = 5;
5804
+ const expectedTaskIds = tasks.map((task) => task.id);
5805
+ while (!allTasksCompleted && iterationCount < maxIterations) {
5806
+ iterationCount++;
5807
+ const currentTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
5808
+ const completedTasks = currentTaskStatus.tasks.filter((task) => task.status === "completed");
5809
+ const pendingTasks = currentTaskStatus.tasks.filter((task) => task.status !== "completed");
5810
+ console.log(`
5811
+ === EXECUTION ITERATION ${iterationCount} ===`);
5812
+ console.log(`Completed tasks: ${completedTasks.length}/${expectedTaskIds.length}`);
5813
+ console.log(`Remaining tasks: ${pendingTasks.map((t) => t.id).join(", ")}`);
5814
+ allTasksCompleted = pendingTasks.length === 0;
5815
+ if (allTasksCompleted) {
5816
+ console.log("All tasks completed! Breaking execution loop.");
5817
+ break;
5818
+ }
5819
+ const iterationPrompt = iterationCount === 1 ? executionPrompt : `${workflowBuilderPrompts.executionAgent.iterationPrompt({
5820
+ completedTasks,
5821
+ pendingTasks,
5822
+ workflowName,
5823
+ resumeData
5824
+ })}
5825
+
5826
+ ${workflowBuilderPrompts.validation.instructions}`;
5827
+ const stream = await executionAgent.streamVNext(iterationPrompt, {
5828
+ structuredOutput: {
5829
+ schema: TaskExecutionIterationInputSchema(tasks.length),
5830
+ model: resolveModel(runtimeContext)
5831
+ },
5832
+ ...enhancedOptions
5833
+ });
5834
+ let finalMessage = "";
5835
+ for await (const chunk of stream.fullStream) {
5836
+ if (chunk.type === "text-delta") {
5837
+ finalMessage += chunk.payload.text;
5838
+ }
5839
+ if (chunk.type === "step-finish") {
5840
+ console.log(finalMessage);
5841
+ finalMessage = "";
5842
+ }
5843
+ if (chunk.type === "tool-result") {
5844
+ console.log(JSON.stringify(chunk, null, 2));
5845
+ }
5846
+ if (chunk.type === "finish") {
5847
+ console.log(chunk);
5848
+ }
5849
+ }
5850
+ await stream.consumeStream();
5851
+ finalResult = await stream.object;
5852
+ console.log(`Iteration ${iterationCount} result:`, { finalResult });
5853
+ if (!finalResult) {
5854
+ throw new Error(`No result received from agent execution on iteration ${iterationCount}`);
5855
+ }
5856
+ const postIterationTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
5857
+ const postCompletedTasks = postIterationTaskStatus.tasks.filter((task) => task.status === "completed");
5858
+ const postPendingTasks = postIterationTaskStatus.tasks.filter((task) => task.status !== "completed");
5859
+ allTasksCompleted = postPendingTasks.length === 0;
5860
+ console.log(
5861
+ `After iteration ${iterationCount}: ${postCompletedTasks.length}/${expectedTaskIds.length} tasks completed in taskManager`
5862
+ );
5863
+ if (finalResult.status === "needs_clarification" && finalResult.questions && finalResult.questions.length > 0) {
5864
+ console.log(
5865
+ `Agent needs clarification on iteration ${iterationCount}: ${finalResult.questions.length} questions`
5866
+ );
5867
+ break;
5868
+ }
5869
+ if (finalResult.status === "completed" && !allTasksCompleted) {
5870
+ console.log(
5871
+ `Agent claimed completion but taskManager shows pending tasks: ${postPendingTasks.map((t) => t.id).join(", ")}`
5872
+ );
5873
+ }
5874
+ }
5875
+ if (iterationCount >= maxIterations && !allTasksCompleted) {
5876
+ finalResult.error = `Maximum iterations (${maxIterations}) reached but not all tasks completed`;
5877
+ finalResult.status = "in_progress";
5878
+ }
5879
+ if (!finalResult) {
5880
+ throw new Error("No result received from agent execution");
5881
+ }
5882
+ if (finalResult.status === "needs_clarification" && finalResult.questions && finalResult.questions.length > 0) {
5883
+ console.log(`Agent needs clarification: ${finalResult.questions.length} questions`);
5884
+ console.log("finalResult", JSON.stringify(finalResult, null, 2));
5885
+ return suspend({
5886
+ questions: finalResult.questions,
5887
+ currentProgress: finalResult.progress,
5888
+ completedTasks: finalResult.completedTasks || [],
5889
+ message: finalResult.message
5890
+ });
5891
+ }
5892
+ const finalTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
5893
+ const finalCompletedTasks = finalTaskStatus.tasks.filter((task) => task.status === "completed");
5894
+ const finalPendingTasks = finalTaskStatus.tasks.filter((task) => task.status !== "completed");
5895
+ const tasksCompleted = finalCompletedTasks.length;
5896
+ const tasksExpected = expectedTaskIds.length;
5897
+ const finalAllTasksCompleted = finalPendingTasks.length === 0;
5898
+ const success = finalAllTasksCompleted && !finalResult.error;
5899
+ const message = success ? `Successfully completed workflow ${action} - all ${tasksExpected} tasks completed after ${iterationCount} iteration(s): ${finalResult.message}` : `Workflow execution finished with issues after ${iterationCount} iteration(s): ${finalResult.message}. Completed: ${tasksCompleted}/${tasksExpected} tasks`;
5900
+ console.log(message);
5901
+ const missingTasks = finalPendingTasks.map((task) => task.id);
5902
+ const validationErrors = [];
5903
+ if (finalResult.error) {
5904
+ validationErrors.push(finalResult.error);
5905
+ }
5906
+ if (!finalAllTasksCompleted) {
5907
+ validationErrors.push(
5908
+ `Incomplete tasks: ${missingTasks.join(", ")} (${tasksCompleted}/${tasksExpected} completed)`
5909
+ );
5910
+ }
5911
+ return {
5912
+ success,
5913
+ completedTasks: finalCompletedTasks.map((task) => task.id),
5914
+ filesModified: finalResult.filesModified || [],
5915
+ validationResults: {
5916
+ passed: success,
5917
+ errors: validationErrors,
5918
+ warnings: finalAllTasksCompleted ? [] : [`Missing ${missingTasks.length} tasks: ${missingTasks.join(", ")}`]
5919
+ },
5920
+ message,
5921
+ error: finalResult.error
5922
+ };
5923
+ } catch (error) {
5924
+ console.error("Task execution failed:", error);
5925
+ return {
5926
+ success: false,
5927
+ completedTasks: [],
5928
+ filesModified: [],
5929
+ validationResults: {
5930
+ passed: false,
5931
+ errors: [`Task execution failed: ${error instanceof Error ? error.message : String(error)}`],
5932
+ warnings: []
5933
+ },
5934
+ message: `Task execution failed: ${error instanceof Error ? error.message : String(error)}`,
5935
+ error: error instanceof Error ? error.message : String(error)
5936
+ };
5937
+ }
5938
+ }
5939
+ });
5940
+ var workflowBuilderWorkflow = createWorkflow({
5941
+ id: "workflow-builder",
5942
+ description: "Create or edit Mastra workflows using AI-powered assistance with iterative planning",
5943
+ inputSchema: WorkflowBuilderInputSchema,
5944
+ outputSchema: WorkflowBuilderResultSchema,
5945
+ steps: [
5946
+ workflowDiscoveryStep,
5947
+ projectDiscoveryStep,
5948
+ workflowResearchStep,
5949
+ planningAndApprovalWorkflow,
5950
+ taskExecutionStep
5951
+ ]
5952
+ }).then(workflowDiscoveryStep).then(projectDiscoveryStep).then(workflowResearchStep).map(async ({ getStepResult, getInitData }) => {
5953
+ const initData = getInitData();
5954
+ const discoveryResult = getStepResult(workflowDiscoveryStep);
5955
+ const projectResult = getStepResult(projectDiscoveryStep);
5956
+ return {
5957
+ action: initData.action,
5958
+ workflowName: initData.workflowName,
5959
+ description: initData.description,
5960
+ requirements: initData.requirements,
5961
+ discoveredWorkflows: discoveryResult.workflows,
5962
+ projectStructure: projectResult,
5963
+ // research: researchResult,
5964
+ research: workflowResearch,
5965
+ userAnswers: void 0
5966
+ };
5967
+ }).dountil(planningAndApprovalWorkflow, async ({ inputData }) => {
5968
+ console.log(`Sub-workflow check: approved=${inputData.approved}`);
5969
+ return inputData.approved === true;
5970
+ }).map(async ({ getStepResult, getInitData }) => {
5971
+ const initData = getInitData();
5972
+ const discoveryResult = getStepResult(workflowDiscoveryStep);
5973
+ const projectResult = getStepResult(projectDiscoveryStep);
5974
+ const subWorkflowResult = getStepResult(planningAndApprovalWorkflow);
5975
+ return {
5976
+ action: initData.action,
5977
+ workflowName: initData.workflowName,
5978
+ description: initData.description,
5979
+ requirements: initData.requirements,
5980
+ tasks: subWorkflowResult.tasks,
5981
+ discoveredWorkflows: discoveryResult.workflows,
5982
+ projectStructure: projectResult,
5983
+ // research: researchResult,
5984
+ research: workflowResearch,
5985
+ projectPath: initData.projectPath || process.cwd()
5986
+ };
5987
+ }).then(taskExecutionStep).commit();
5988
+
5989
+ // src/workflows/workflow-map.ts
5990
+ var agentBuilderWorkflows = {
5991
+ "merge-template": agentBuilderTemplateWorkflow,
5992
+ "workflow-builder": workflowBuilderWorkflow
5993
+ };
3642
5994
 
3643
- // src/index.ts
5995
+ // src/agent/index.ts
3644
5996
  var AgentBuilder = class extends Agent {
3645
5997
  builderConfig;
3646
5998
  /**
3647
- * Private constructor - use AgentBuilder.create() instead
5999
+ * Constructor for AgentBuilder
3648
6000
  */
3649
6001
  constructor(config) {
3650
6002
  const additionalInstructions = config.instructions ? `## Priority Instructions
@@ -3662,9 +6014,7 @@ ${config.instructions}` : "";
3662
6014
  ...config.tools || {}
3663
6015
  };
3664
6016
  },
3665
- workflows: {
3666
- "merge-template": mergeTemplateWorkflow
3667
- },
6017
+ workflows: agentBuilderWorkflows,
3668
6018
  memory: new Memory({
3669
6019
  options: AgentBuilderDefaults.DEFAULT_MEMORY_CONFIG,
3670
6020
  processors: [
@@ -3767,4 +6117,6 @@ ${!options?.outputFormat || options.outputFormat === "both" ? "Provide both expl
3767
6117
  }
3768
6118
  };
3769
6119
 
3770
- export { AgentBuilder };
6120
+ export { AgentBuilder, AgentBuilderDefaults, agentBuilderTemplateWorkflow, agentBuilderWorkflows, mergeTemplateBySlug, planningAndApprovalWorkflow, workflowBuilderWorkflow };
6121
+ //# sourceMappingURL=index.js.map
6122
+ //# sourceMappingURL=index.js.map