@mastra/agent-builder 0.0.1-alpha.1 → 0.0.1-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/agent/index.d.ts +5885 -0
- package/dist/agent/index.d.ts.map +1 -0
- package/dist/defaults.d.ts +6529 -0
- package/dist/defaults.d.ts.map +1 -0
- package/dist/index.d.ts +4 -4
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1810 -36
- package/dist/index.js.map +1 -0
- package/dist/processors/tool-summary.d.ts +29 -0
- package/dist/processors/tool-summary.d.ts.map +1 -0
- package/dist/processors/write-file.d.ts +10 -0
- package/dist/processors/write-file.d.ts.map +1 -0
- package/dist/types.d.ts +1121 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/utils.d.ts +63 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/workflows/index.d.ts +5 -0
- package/dist/workflows/index.d.ts.map +1 -0
- package/dist/workflows/shared/schema.d.ts +139 -0
- package/dist/workflows/shared/schema.d.ts.map +1 -0
- package/dist/workflows/task-planning/prompts.d.ts +37 -0
- package/dist/workflows/task-planning/prompts.d.ts.map +1 -0
- package/dist/workflows/task-planning/schema.d.ts +548 -0
- package/dist/workflows/task-planning/schema.d.ts.map +1 -0
- package/dist/workflows/task-planning/task-planning.d.ts +992 -0
- package/dist/workflows/task-planning/task-planning.d.ts.map +1 -0
- package/dist/workflows/template-builder/template-builder.d.ts +1910 -0
- package/dist/workflows/template-builder/template-builder.d.ts.map +1 -0
- package/dist/workflows/workflow-builder/prompts.d.ts +44 -0
- package/dist/workflows/workflow-builder/prompts.d.ts.map +1 -0
- package/dist/workflows/workflow-builder/schema.d.ts +1170 -0
- package/dist/workflows/workflow-builder/schema.d.ts.map +1 -0
- package/dist/workflows/workflow-builder/tools.d.ts +309 -0
- package/dist/workflows/workflow-builder/tools.d.ts.map +1 -0
- package/dist/workflows/workflow-builder/workflow-builder.d.ts +2714 -0
- package/dist/workflows/workflow-builder/workflow-builder.d.ts.map +1 -0
- package/dist/workflows/workflow-map.d.ts +3735 -0
- package/dist/workflows/workflow-map.d.ts.map +1 -0
- package/package.json +20 -9
- package/.turbo/turbo-build.log +0 -12
- package/dist/_tsup-dts-rollup.d.cts +0 -14933
- package/dist/_tsup-dts-rollup.d.ts +0 -14933
- package/dist/index.cjs +0 -4357
- package/dist/index.d.cts +0 -4
- package/eslint.config.js +0 -11
- package/integration-tests/CHANGELOG.md +0 -9
- package/integration-tests/README.md +0 -154
- package/integration-tests/docker-compose.yml +0 -39
- package/integration-tests/package.json +0 -38
- package/integration-tests/src/agent-template-behavior.test.ts +0 -103
- package/integration-tests/src/fixtures/minimal-mastra-project/env.example +0 -6
- package/integration-tests/src/fixtures/minimal-mastra-project/package.json +0 -17
- package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/agents/weather.ts +0 -34
- package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/index.ts +0 -15
- package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/mcp/index.ts +0 -46
- package/integration-tests/src/fixtures/minimal-mastra-project/src/mastra/tools/weather.ts +0 -14
- package/integration-tests/src/fixtures/minimal-mastra-project/tsconfig.json +0 -17
- package/integration-tests/src/template-integration.test.ts +0 -312
- package/integration-tests/tsconfig.json +0 -9
- package/integration-tests/vitest.config.ts +0 -18
- package/src/agent/index.ts +0 -187
- package/src/agent-builder.test.ts +0 -313
- package/src/defaults.ts +0 -2876
- package/src/index.ts +0 -3
- package/src/processors/tool-summary.ts +0 -145
- package/src/processors/write-file.ts +0 -17
- package/src/types.ts +0 -305
- package/src/utils.ts +0 -409
- package/src/workflows/index.ts +0 -1
- package/src/workflows/template-builder.ts +0 -1682
- package/tsconfig.json +0 -5
- package/vitest.config.ts +0 -11
package/dist/index.js
CHANGED
|
@@ -2,18 +2,20 @@ import { Agent } from '@mastra/core/agent';
|
|
|
2
2
|
import { Memory } from '@mastra/memory';
|
|
3
3
|
import { TokenLimiter } from '@mastra/memory/processors';
|
|
4
4
|
import { exec as exec$1, execFile as execFile$1, spawn as spawn$1 } from 'child_process';
|
|
5
|
-
import { mkdtemp, rm, readFile, writeFile, mkdir, copyFile, stat
|
|
6
|
-
import { join, resolve,
|
|
5
|
+
import { mkdtemp, rm, readFile, writeFile, readdir, mkdir, copyFile, stat } from 'fs/promises';
|
|
6
|
+
import { join, resolve, basename, extname, dirname, isAbsolute, relative } from 'path';
|
|
7
7
|
import { createTool } from '@mastra/core/tools';
|
|
8
8
|
import ignore from 'ignore';
|
|
9
9
|
import { z } from 'zod';
|
|
10
10
|
import { existsSync, readFileSync } from 'fs';
|
|
11
11
|
import { createRequire } from 'module';
|
|
12
12
|
import { promisify } from 'util';
|
|
13
|
-
import {
|
|
13
|
+
import { openai as openai$1 } from '@ai-sdk/openai_v5';
|
|
14
|
+
import { createStep as createStep$1, Agent as Agent$1, createWorkflow as createWorkflow$1, createTool as createTool$1, MemoryProcessor } from '@mastra/core';
|
|
14
15
|
import { tmpdir } from 'os';
|
|
15
16
|
import { openai } from '@ai-sdk/openai';
|
|
16
17
|
import { createStep, createWorkflow } from '@mastra/core/workflows';
|
|
18
|
+
import { stepCountIs } from 'ai';
|
|
17
19
|
|
|
18
20
|
// src/agent/index.ts
|
|
19
21
|
var UNIT_KINDS = ["mcp-server", "tool", "workflow", "agent", "integration", "network", "other"];
|
|
@@ -491,6 +493,42 @@ async function renameAndCopyFile(sourceFile, targetFile) {
|
|
|
491
493
|
console.log(`\u{1F4DD} Copied with unique name: ${basename(uniqueTargetFile)}`);
|
|
492
494
|
return uniqueTargetFile;
|
|
493
495
|
}
|
|
496
|
+
var resolveModel = (runtimeContext) => {
|
|
497
|
+
const modelFromContext = runtimeContext.get("model");
|
|
498
|
+
if (modelFromContext) {
|
|
499
|
+
console.log(`Using model: ${modelFromContext}`);
|
|
500
|
+
if (isValidMastraLanguageModel(modelFromContext)) {
|
|
501
|
+
return modelFromContext;
|
|
502
|
+
}
|
|
503
|
+
throw new Error(
|
|
504
|
+
'Invalid model provided. Model must be a MastraLanguageModel instance (e.g., openai("gpt-4"), anthropic("claude-3-5-sonnet"), etc.)'
|
|
505
|
+
);
|
|
506
|
+
}
|
|
507
|
+
return openai$1("gpt-4.1");
|
|
508
|
+
};
|
|
509
|
+
var isValidMastraLanguageModel = (model) => {
|
|
510
|
+
return model && typeof model === "object" && typeof model.modelId === "string" && typeof model.generate === "function";
|
|
511
|
+
};
|
|
512
|
+
var resolveTargetPath = (inputData, runtimeContext) => {
|
|
513
|
+
if (inputData.targetPath) {
|
|
514
|
+
return inputData.targetPath;
|
|
515
|
+
}
|
|
516
|
+
const contextPath = runtimeContext.get("targetPath");
|
|
517
|
+
if (contextPath) {
|
|
518
|
+
return contextPath;
|
|
519
|
+
}
|
|
520
|
+
const envRoot = process.env.MASTRA_PROJECT_ROOT?.trim();
|
|
521
|
+
if (envRoot) {
|
|
522
|
+
return envRoot;
|
|
523
|
+
}
|
|
524
|
+
const cwd = process.cwd();
|
|
525
|
+
const parent = dirname(cwd);
|
|
526
|
+
const grand = dirname(parent);
|
|
527
|
+
if (basename(cwd) === "output" && basename(parent) === ".mastra") {
|
|
528
|
+
return grand;
|
|
529
|
+
}
|
|
530
|
+
return cwd;
|
|
531
|
+
};
|
|
494
532
|
|
|
495
533
|
// src/defaults.ts
|
|
496
534
|
var AgentBuilderDefaults = class _AgentBuilderDefaults {
|
|
@@ -944,7 +982,7 @@ export const mastra = new Mastra({
|
|
|
944
982
|
path: z.string().describe("Directory path to list"),
|
|
945
983
|
recursive: z.boolean().default(false).describe("List subdirectories recursively"),
|
|
946
984
|
includeHidden: z.boolean().default(false).describe("Include hidden files and directories"),
|
|
947
|
-
pattern: z.string().
|
|
985
|
+
pattern: z.string().default("*").describe("Glob pattern to filter files"),
|
|
948
986
|
maxDepth: z.number().default(10).describe("Maximum recursion depth"),
|
|
949
987
|
includeMetadata: z.boolean().default(true).describe("Include file metadata")
|
|
950
988
|
}),
|
|
@@ -1072,12 +1110,16 @@ export const mastra = new Mastra({
|
|
|
1072
1110
|
}),
|
|
1073
1111
|
replaceLines: createTool({
|
|
1074
1112
|
id: "replace-lines",
|
|
1075
|
-
description: "Replace specific line ranges in files with new content.
|
|
1113
|
+
description: "Replace specific line ranges in files with new content. IMPORTANT: This tool replaces ENTIRE lines, not partial content within lines. Lines are 1-indexed.",
|
|
1076
1114
|
inputSchema: z.object({
|
|
1077
1115
|
filePath: z.string().describe("Path to the file to edit"),
|
|
1078
|
-
startLine: z.number().describe("Starting line number to replace (1-indexed)"),
|
|
1079
|
-
endLine: z.number().describe(
|
|
1080
|
-
|
|
1116
|
+
startLine: z.number().describe("Starting line number to replace (1-indexed, inclusive). Count from the first line = 1"),
|
|
1117
|
+
endLine: z.number().describe(
|
|
1118
|
+
"Ending line number to replace (1-indexed, inclusive). To replace single line, use same number as startLine"
|
|
1119
|
+
),
|
|
1120
|
+
newContent: z.string().describe(
|
|
1121
|
+
'New content to replace the lines with. Use empty string "" to delete lines completely. For multiline content, include \\n characters'
|
|
1122
|
+
),
|
|
1081
1123
|
createBackup: z.boolean().default(false).describe("Create backup file before editing")
|
|
1082
1124
|
}),
|
|
1083
1125
|
outputSchema: z.object({
|
|
@@ -1091,6 +1133,35 @@ export const mastra = new Mastra({
|
|
|
1091
1133
|
return await _AgentBuilderDefaults.replaceLines({ ...context, projectPath });
|
|
1092
1134
|
}
|
|
1093
1135
|
}),
|
|
1136
|
+
// File diagnostics tool to help debug line replacement issues
|
|
1137
|
+
showFileLines: createTool({
|
|
1138
|
+
id: "show-file-lines",
|
|
1139
|
+
description: "Show specific lines from a file with line numbers. Useful for debugging before using replaceLines.",
|
|
1140
|
+
inputSchema: z.object({
|
|
1141
|
+
filePath: z.string().describe("Path to the file to examine"),
|
|
1142
|
+
startLine: z.number().optional().describe("Starting line number to show (1-indexed). If not provided, shows all lines"),
|
|
1143
|
+
endLine: z.number().optional().describe(
|
|
1144
|
+
"Ending line number to show (1-indexed, inclusive). If not provided but startLine is, shows only that line"
|
|
1145
|
+
),
|
|
1146
|
+
context: z.number().default(2).describe("Number of context lines to show before and after the range")
|
|
1147
|
+
}),
|
|
1148
|
+
outputSchema: z.object({
|
|
1149
|
+
success: z.boolean(),
|
|
1150
|
+
lines: z.array(
|
|
1151
|
+
z.object({
|
|
1152
|
+
lineNumber: z.number(),
|
|
1153
|
+
content: z.string(),
|
|
1154
|
+
isTarget: z.boolean().describe("Whether this line is in the target range")
|
|
1155
|
+
})
|
|
1156
|
+
),
|
|
1157
|
+
totalLines: z.number(),
|
|
1158
|
+
message: z.string(),
|
|
1159
|
+
error: z.string().optional()
|
|
1160
|
+
}),
|
|
1161
|
+
execute: async ({ context }) => {
|
|
1162
|
+
return await _AgentBuilderDefaults.showFileLines({ ...context, projectPath });
|
|
1163
|
+
}
|
|
1164
|
+
}),
|
|
1094
1165
|
// Interactive Communication
|
|
1095
1166
|
askClarification: createTool({
|
|
1096
1167
|
id: "ask-clarification",
|
|
@@ -1168,11 +1239,13 @@ export const mastra = new Mastra({
|
|
|
1168
1239
|
}),
|
|
1169
1240
|
validateCode: createTool({
|
|
1170
1241
|
id: "validate-code",
|
|
1171
|
-
description: "Validates
|
|
1242
|
+
description: "Validates code using a fast hybrid approach: syntax \u2192 semantic \u2192 lint. RECOMMENDED: Always provide specific files for optimal performance and accuracy.",
|
|
1172
1243
|
inputSchema: z.object({
|
|
1173
1244
|
projectPath: z.string().optional().describe("Path to the project to validate (defaults to current project)"),
|
|
1174
|
-
validationType: z.array(z.enum(["types", "lint", "schemas", "tests", "build"])).describe(
|
|
1175
|
-
files: z.array(z.string()).optional().describe(
|
|
1245
|
+
validationType: z.array(z.enum(["types", "lint", "schemas", "tests", "build"])).describe('Types of validation to perform. Recommended: ["types", "lint"] for code quality'),
|
|
1246
|
+
files: z.array(z.string()).optional().describe(
|
|
1247
|
+
"RECOMMENDED: Specific files to validate (e.g., files you created/modified). Uses hybrid validation: fast syntax check \u2192 semantic types \u2192 ESLint. Without files, falls back to slower CLI validation."
|
|
1248
|
+
)
|
|
1176
1249
|
}),
|
|
1177
1250
|
outputSchema: z.object({
|
|
1178
1251
|
valid: z.boolean(),
|
|
@@ -1863,13 +1936,99 @@ export const mastra = new Mastra({
|
|
|
1863
1936
|
}
|
|
1864
1937
|
}
|
|
1865
1938
|
}
|
|
1939
|
+
// Cache for TypeScript program (lazily loaded)
|
|
1940
|
+
static tsProgram = null;
|
|
1941
|
+
static programProjectPath = null;
|
|
1866
1942
|
/**
|
|
1867
|
-
* Validate code using
|
|
1943
|
+
* Validate code using hybrid approach: syntax -> types -> lint
|
|
1944
|
+
*
|
|
1945
|
+
* BEST PRACTICES FOR CODING AGENTS:
|
|
1946
|
+
*
|
|
1947
|
+
* ✅ RECOMMENDED (Fast & Accurate):
|
|
1948
|
+
* validateCode({
|
|
1949
|
+
* validationType: ['types', 'lint'],
|
|
1950
|
+
* files: ['src/workflows/my-workflow.ts', 'src/components/Button.tsx']
|
|
1951
|
+
* })
|
|
1952
|
+
*
|
|
1953
|
+
* Performance: ~150ms
|
|
1954
|
+
* - Syntax check (1ms) - catches 80% of issues instantly
|
|
1955
|
+
* - Semantic validation (100ms) - full type checking with dependencies
|
|
1956
|
+
* - ESLint (50ms) - style and best practices
|
|
1957
|
+
* - Only shows errors from YOUR files
|
|
1958
|
+
*
|
|
1959
|
+
* ❌ AVOID (Slow & Noisy):
|
|
1960
|
+
* validateCode({ validationType: ['types', 'lint'] }) // no files specified
|
|
1961
|
+
*
|
|
1962
|
+
* Performance: ~2000ms+
|
|
1963
|
+
* - Full project CLI validation
|
|
1964
|
+
* - Shows errors from all project files (confusing)
|
|
1965
|
+
* - Much slower for coding agents
|
|
1966
|
+
*
|
|
1967
|
+
* @param projectPath - Project root directory (defaults to cwd)
|
|
1968
|
+
* @param validationType - ['types', 'lint'] recommended for most use cases
|
|
1969
|
+
* @param files - ALWAYS provide this for best performance
|
|
1868
1970
|
*/
|
|
1869
1971
|
static async validateCode({
|
|
1870
1972
|
projectPath,
|
|
1871
1973
|
validationType,
|
|
1872
1974
|
files
|
|
1975
|
+
}) {
|
|
1976
|
+
const errors = [];
|
|
1977
|
+
const validationsPassed = [];
|
|
1978
|
+
const validationsFailed = [];
|
|
1979
|
+
const targetProjectPath = projectPath || process.cwd();
|
|
1980
|
+
if (!files || files.length === 0) {
|
|
1981
|
+
return this.validateCodeCLI({ projectPath, validationType });
|
|
1982
|
+
}
|
|
1983
|
+
for (const filePath of files) {
|
|
1984
|
+
const absolutePath = isAbsolute(filePath) ? filePath : resolve(targetProjectPath, filePath);
|
|
1985
|
+
try {
|
|
1986
|
+
const fileContent = await readFile(absolutePath, "utf-8");
|
|
1987
|
+
const fileResults = await this.validateSingleFileHybrid(
|
|
1988
|
+
absolutePath,
|
|
1989
|
+
fileContent,
|
|
1990
|
+
targetProjectPath,
|
|
1991
|
+
validationType
|
|
1992
|
+
);
|
|
1993
|
+
errors.push(...fileResults.errors);
|
|
1994
|
+
for (const type of validationType) {
|
|
1995
|
+
const hasErrors = fileResults.errors.some((e) => e.type === type && e.severity === "error");
|
|
1996
|
+
if (hasErrors) {
|
|
1997
|
+
if (!validationsFailed.includes(type)) validationsFailed.push(type);
|
|
1998
|
+
} else {
|
|
1999
|
+
if (!validationsPassed.includes(type)) validationsPassed.push(type);
|
|
2000
|
+
}
|
|
2001
|
+
}
|
|
2002
|
+
} catch (error) {
|
|
2003
|
+
errors.push({
|
|
2004
|
+
type: "typescript",
|
|
2005
|
+
severity: "error",
|
|
2006
|
+
message: `Failed to read file ${filePath}: ${error instanceof Error ? error.message : String(error)}`,
|
|
2007
|
+
file: filePath
|
|
2008
|
+
});
|
|
2009
|
+
validationsFailed.push("types");
|
|
2010
|
+
}
|
|
2011
|
+
}
|
|
2012
|
+
const totalErrors = errors.filter((e) => e.severity === "error").length;
|
|
2013
|
+
const totalWarnings = errors.filter((e) => e.severity === "warning").length;
|
|
2014
|
+
const isValid = totalErrors === 0;
|
|
2015
|
+
return {
|
|
2016
|
+
valid: isValid,
|
|
2017
|
+
errors,
|
|
2018
|
+
summary: {
|
|
2019
|
+
totalErrors,
|
|
2020
|
+
totalWarnings,
|
|
2021
|
+
validationsPassed,
|
|
2022
|
+
validationsFailed
|
|
2023
|
+
}
|
|
2024
|
+
};
|
|
2025
|
+
}
|
|
2026
|
+
/**
|
|
2027
|
+
* CLI-based validation for when no specific files are provided
|
|
2028
|
+
*/
|
|
2029
|
+
static async validateCodeCLI({
|
|
2030
|
+
projectPath,
|
|
2031
|
+
validationType
|
|
1873
2032
|
}) {
|
|
1874
2033
|
const errors = [];
|
|
1875
2034
|
const validationsPassed = [];
|
|
@@ -1877,8 +2036,7 @@ export const mastra = new Mastra({
|
|
|
1877
2036
|
const execOptions = { cwd: projectPath };
|
|
1878
2037
|
if (validationType.includes("types")) {
|
|
1879
2038
|
try {
|
|
1880
|
-
const
|
|
1881
|
-
const args = ["tsc", "--noEmit", ...fileArgs];
|
|
2039
|
+
const args = ["tsc", "--noEmit"];
|
|
1882
2040
|
await execFile("npx", args, execOptions);
|
|
1883
2041
|
validationsPassed.push("types");
|
|
1884
2042
|
} catch (error) {
|
|
@@ -1900,8 +2058,7 @@ export const mastra = new Mastra({
|
|
|
1900
2058
|
}
|
|
1901
2059
|
if (validationType.includes("lint")) {
|
|
1902
2060
|
try {
|
|
1903
|
-
const
|
|
1904
|
-
const eslintArgs = ["eslint", ...fileArgs, "--format", "json"];
|
|
2061
|
+
const eslintArgs = ["eslint", "--format", "json"];
|
|
1905
2062
|
const { stdout } = await execFile("npx", eslintArgs, execOptions);
|
|
1906
2063
|
if (stdout) {
|
|
1907
2064
|
const eslintResults = JSON.parse(stdout);
|
|
@@ -1950,6 +2107,187 @@ export const mastra = new Mastra({
|
|
|
1950
2107
|
}
|
|
1951
2108
|
};
|
|
1952
2109
|
}
|
|
2110
|
+
/**
|
|
2111
|
+
* Hybrid validation for a single file
|
|
2112
|
+
*/
|
|
2113
|
+
static async validateSingleFileHybrid(filePath, fileContent, projectPath, validationType) {
|
|
2114
|
+
const errors = [];
|
|
2115
|
+
if (validationType.includes("types")) {
|
|
2116
|
+
const syntaxErrors = await this.validateSyntaxOnly(fileContent, filePath);
|
|
2117
|
+
errors.push(...syntaxErrors);
|
|
2118
|
+
if (syntaxErrors.length > 0) {
|
|
2119
|
+
return { errors };
|
|
2120
|
+
}
|
|
2121
|
+
const typeErrors = await this.validateTypesSemantic(filePath, projectPath);
|
|
2122
|
+
errors.push(...typeErrors);
|
|
2123
|
+
}
|
|
2124
|
+
if (validationType.includes("lint") && !errors.some((e) => e.severity === "error")) {
|
|
2125
|
+
const lintErrors = await this.validateESLintSingle(filePath, projectPath);
|
|
2126
|
+
errors.push(...lintErrors);
|
|
2127
|
+
}
|
|
2128
|
+
return { errors };
|
|
2129
|
+
}
|
|
2130
|
+
/**
|
|
2131
|
+
* Fast syntax-only validation using TypeScript parser
|
|
2132
|
+
*/
|
|
2133
|
+
static async validateSyntaxOnly(fileContent, fileName) {
|
|
2134
|
+
const errors = [];
|
|
2135
|
+
try {
|
|
2136
|
+
const ts = await import('typescript');
|
|
2137
|
+
const sourceFile = ts.createSourceFile(fileName, fileContent, ts.ScriptTarget.Latest, true);
|
|
2138
|
+
const options = {
|
|
2139
|
+
allowJs: true,
|
|
2140
|
+
checkJs: false,
|
|
2141
|
+
noEmit: true
|
|
2142
|
+
};
|
|
2143
|
+
const host = {
|
|
2144
|
+
getSourceFile: (name) => name === fileName ? sourceFile : void 0,
|
|
2145
|
+
writeFile: () => {
|
|
2146
|
+
},
|
|
2147
|
+
getCurrentDirectory: () => "",
|
|
2148
|
+
getDirectories: () => [],
|
|
2149
|
+
fileExists: (name) => name === fileName,
|
|
2150
|
+
readFile: (name) => name === fileName ? fileContent : void 0,
|
|
2151
|
+
getCanonicalFileName: (name) => name,
|
|
2152
|
+
useCaseSensitiveFileNames: () => true,
|
|
2153
|
+
getNewLine: () => "\n",
|
|
2154
|
+
getDefaultLibFileName: () => "lib.d.ts"
|
|
2155
|
+
};
|
|
2156
|
+
const program = ts.createProgram([fileName], options, host);
|
|
2157
|
+
const diagnostics = program.getSyntacticDiagnostics(sourceFile);
|
|
2158
|
+
for (const diagnostic of diagnostics) {
|
|
2159
|
+
if (diagnostic.start !== void 0) {
|
|
2160
|
+
const position = sourceFile.getLineAndCharacterOfPosition(diagnostic.start);
|
|
2161
|
+
errors.push({
|
|
2162
|
+
type: "typescript",
|
|
2163
|
+
severity: "error",
|
|
2164
|
+
message: ts.flattenDiagnosticMessageText(diagnostic.messageText, "\n"),
|
|
2165
|
+
file: fileName,
|
|
2166
|
+
line: position.line + 1,
|
|
2167
|
+
column: position.character + 1
|
|
2168
|
+
});
|
|
2169
|
+
}
|
|
2170
|
+
}
|
|
2171
|
+
} catch (error) {
|
|
2172
|
+
console.warn("TypeScript not available for syntax validation:", error);
|
|
2173
|
+
const lines = fileContent.split("\n");
|
|
2174
|
+
const commonErrors = [
|
|
2175
|
+
{ pattern: /\bimport\s+.*\s+from\s+['""][^'"]*$/, message: "Unterminated import statement" },
|
|
2176
|
+
{ pattern: /\{[^}]*$/, message: "Unclosed brace" },
|
|
2177
|
+
{ pattern: /\([^)]*$/, message: "Unclosed parenthesis" },
|
|
2178
|
+
{ pattern: /\[[^\]]*$/, message: "Unclosed bracket" }
|
|
2179
|
+
];
|
|
2180
|
+
lines.forEach((line, index) => {
|
|
2181
|
+
commonErrors.forEach(({ pattern, message }) => {
|
|
2182
|
+
if (pattern.test(line)) {
|
|
2183
|
+
errors.push({
|
|
2184
|
+
type: "typescript",
|
|
2185
|
+
severity: "error",
|
|
2186
|
+
message,
|
|
2187
|
+
file: fileName,
|
|
2188
|
+
line: index + 1
|
|
2189
|
+
});
|
|
2190
|
+
}
|
|
2191
|
+
});
|
|
2192
|
+
});
|
|
2193
|
+
}
|
|
2194
|
+
return errors;
|
|
2195
|
+
}
|
|
2196
|
+
/**
|
|
2197
|
+
* TypeScript semantic validation using incremental program
|
|
2198
|
+
*/
|
|
2199
|
+
static async validateTypesSemantic(filePath, projectPath) {
|
|
2200
|
+
const errors = [];
|
|
2201
|
+
try {
|
|
2202
|
+
const program = await this.getOrCreateTSProgram(projectPath);
|
|
2203
|
+
if (!program) {
|
|
2204
|
+
return errors;
|
|
2205
|
+
}
|
|
2206
|
+
const sourceFile = program.getSourceFile(filePath);
|
|
2207
|
+
if (!sourceFile) {
|
|
2208
|
+
return errors;
|
|
2209
|
+
}
|
|
2210
|
+
const diagnostics = [
|
|
2211
|
+
...program.getSemanticDiagnostics(sourceFile),
|
|
2212
|
+
...program.getSyntacticDiagnostics(sourceFile)
|
|
2213
|
+
];
|
|
2214
|
+
const ts = await import('typescript');
|
|
2215
|
+
for (const diagnostic of diagnostics) {
|
|
2216
|
+
if (diagnostic.start !== void 0) {
|
|
2217
|
+
const position = sourceFile.getLineAndCharacterOfPosition(diagnostic.start);
|
|
2218
|
+
errors.push({
|
|
2219
|
+
type: "typescript",
|
|
2220
|
+
severity: diagnostic.category === ts.DiagnosticCategory.Warning ? "warning" : "error",
|
|
2221
|
+
message: ts.flattenDiagnosticMessageText(diagnostic.messageText, "\n"),
|
|
2222
|
+
file: filePath,
|
|
2223
|
+
line: position.line + 1,
|
|
2224
|
+
column: position.character + 1
|
|
2225
|
+
});
|
|
2226
|
+
}
|
|
2227
|
+
}
|
|
2228
|
+
} catch (error) {
|
|
2229
|
+
console.warn(`TypeScript semantic validation failed for ${filePath}:`, error);
|
|
2230
|
+
}
|
|
2231
|
+
return errors;
|
|
2232
|
+
}
|
|
2233
|
+
/**
|
|
2234
|
+
* ESLint validation for a single file
|
|
2235
|
+
*/
|
|
2236
|
+
static async validateESLintSingle(filePath, projectPath) {
|
|
2237
|
+
const errors = [];
|
|
2238
|
+
try {
|
|
2239
|
+
const { stdout } = await execFile("npx", ["eslint", filePath, "--format", "json"], { cwd: projectPath });
|
|
2240
|
+
if (stdout) {
|
|
2241
|
+
const eslintResults = JSON.parse(stdout);
|
|
2242
|
+
const eslintErrors = this.parseESLintErrors(eslintResults);
|
|
2243
|
+
errors.push(...eslintErrors);
|
|
2244
|
+
}
|
|
2245
|
+
} catch (error) {
|
|
2246
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
2247
|
+
if (errorMessage.includes('"filePath"') || errorMessage.includes("messages")) {
|
|
2248
|
+
try {
|
|
2249
|
+
const eslintResults = JSON.parse(errorMessage);
|
|
2250
|
+
const eslintErrors = this.parseESLintErrors(eslintResults);
|
|
2251
|
+
errors.push(...eslintErrors);
|
|
2252
|
+
} catch {
|
|
2253
|
+
}
|
|
2254
|
+
}
|
|
2255
|
+
}
|
|
2256
|
+
return errors;
|
|
2257
|
+
}
|
|
2258
|
+
/**
|
|
2259
|
+
* Get or create TypeScript program
|
|
2260
|
+
*/
|
|
2261
|
+
static async getOrCreateTSProgram(projectPath) {
|
|
2262
|
+
if (this.tsProgram && this.programProjectPath === projectPath) {
|
|
2263
|
+
return this.tsProgram;
|
|
2264
|
+
}
|
|
2265
|
+
try {
|
|
2266
|
+
const ts = await import('typescript');
|
|
2267
|
+
const configPath = ts.findConfigFile(projectPath, ts.sys.fileExists, "tsconfig.json");
|
|
2268
|
+
if (!configPath) {
|
|
2269
|
+
return null;
|
|
2270
|
+
}
|
|
2271
|
+
const configFile = ts.readConfigFile(configPath, ts.sys.readFile);
|
|
2272
|
+
if (configFile.error) {
|
|
2273
|
+
return null;
|
|
2274
|
+
}
|
|
2275
|
+
const parsedConfig = ts.parseJsonConfigFileContent(configFile.config, ts.sys, projectPath);
|
|
2276
|
+
if (parsedConfig.errors.length > 0) {
|
|
2277
|
+
return null;
|
|
2278
|
+
}
|
|
2279
|
+
this.tsProgram = ts.createProgram({
|
|
2280
|
+
rootNames: parsedConfig.fileNames,
|
|
2281
|
+
options: parsedConfig.options
|
|
2282
|
+
});
|
|
2283
|
+
this.programProjectPath = projectPath;
|
|
2284
|
+
return this.tsProgram;
|
|
2285
|
+
} catch (error) {
|
|
2286
|
+
console.warn("Failed to create TypeScript program:", error);
|
|
2287
|
+
return null;
|
|
2288
|
+
}
|
|
2289
|
+
}
|
|
2290
|
+
// Note: Old filterTypeScriptErrors method removed in favor of hybrid validation approach
|
|
1953
2291
|
/**
|
|
1954
2292
|
* Parse ESLint errors from JSON output
|
|
1955
2293
|
*/
|
|
@@ -2364,10 +2702,17 @@ export const mastra = new Mastra({
|
|
|
2364
2702
|
const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath);
|
|
2365
2703
|
const content = await readFile(fullPath, "utf-8");
|
|
2366
2704
|
const lines = content.split("\n");
|
|
2367
|
-
if (startLine < 1 || endLine < 1
|
|
2705
|
+
if (startLine < 1 || endLine < 1) {
|
|
2368
2706
|
return {
|
|
2369
2707
|
success: false,
|
|
2370
|
-
message: `
|
|
2708
|
+
message: `Line numbers must be 1 or greater. Got startLine: ${startLine}, endLine: ${endLine}`,
|
|
2709
|
+
error: "Invalid line range"
|
|
2710
|
+
};
|
|
2711
|
+
}
|
|
2712
|
+
if (startLine > lines.length || endLine > lines.length) {
|
|
2713
|
+
return {
|
|
2714
|
+
success: false,
|
|
2715
|
+
message: `Line range ${startLine}-${endLine} is out of bounds. File has ${lines.length} lines. Remember: lines are 1-indexed, so valid range is 1-${lines.length}.`,
|
|
2371
2716
|
error: "Invalid line range"
|
|
2372
2717
|
};
|
|
2373
2718
|
}
|
|
@@ -2391,9 +2736,10 @@ export const mastra = new Mastra({
|
|
|
2391
2736
|
const updatedContent = updatedLines.join("\n");
|
|
2392
2737
|
await writeFile(fullPath, updatedContent, "utf-8");
|
|
2393
2738
|
const linesReplaced = endLine - startLine + 1;
|
|
2739
|
+
const newLineCount = newLines.length;
|
|
2394
2740
|
return {
|
|
2395
2741
|
success: true,
|
|
2396
|
-
message: `Successfully replaced ${linesReplaced} lines (${startLine}-${endLine}) in ${filePath}`,
|
|
2742
|
+
message: `Successfully replaced ${linesReplaced} lines (${startLine}-${endLine}) with ${newLineCount} new lines in ${filePath}`,
|
|
2397
2743
|
linesReplaced,
|
|
2398
2744
|
backup
|
|
2399
2745
|
};
|
|
@@ -2405,6 +2751,51 @@ export const mastra = new Mastra({
|
|
|
2405
2751
|
};
|
|
2406
2752
|
}
|
|
2407
2753
|
}
|
|
2754
|
+
/**
|
|
2755
|
+
* Show file lines with line numbers for debugging
|
|
2756
|
+
*/
|
|
2757
|
+
static async showFileLines(context) {
|
|
2758
|
+
const { filePath, startLine, endLine, context: contextLines = 2, projectPath = process.cwd() } = context;
|
|
2759
|
+
try {
|
|
2760
|
+
const fullPath = isAbsolute(filePath) ? filePath : join(projectPath, filePath);
|
|
2761
|
+
const content = await readFile(fullPath, "utf-8");
|
|
2762
|
+
const lines = content.split("\n");
|
|
2763
|
+
let targetStart = startLine;
|
|
2764
|
+
let targetEnd = endLine;
|
|
2765
|
+
if (!targetStart) {
|
|
2766
|
+
targetStart = 1;
|
|
2767
|
+
targetEnd = lines.length;
|
|
2768
|
+
} else if (!targetEnd) {
|
|
2769
|
+
targetEnd = targetStart;
|
|
2770
|
+
}
|
|
2771
|
+
const displayStart = Math.max(1, targetStart - contextLines);
|
|
2772
|
+
const displayEnd = Math.min(lines.length, targetEnd + contextLines);
|
|
2773
|
+
const result = [];
|
|
2774
|
+
for (let i = displayStart; i <= displayEnd; i++) {
|
|
2775
|
+
const lineIndex = i - 1;
|
|
2776
|
+
const isTarget = i >= targetStart && i <= targetEnd;
|
|
2777
|
+
result.push({
|
|
2778
|
+
lineNumber: i,
|
|
2779
|
+
content: lineIndex < lines.length ? lines[lineIndex] ?? "" : "",
|
|
2780
|
+
isTarget
|
|
2781
|
+
});
|
|
2782
|
+
}
|
|
2783
|
+
return {
|
|
2784
|
+
success: true,
|
|
2785
|
+
lines: result,
|
|
2786
|
+
totalLines: lines.length,
|
|
2787
|
+
message: `Showing lines ${displayStart}-${displayEnd} of ${lines.length} total lines in ${filePath}`
|
|
2788
|
+
};
|
|
2789
|
+
} catch (error) {
|
|
2790
|
+
return {
|
|
2791
|
+
success: false,
|
|
2792
|
+
lines: [],
|
|
2793
|
+
totalLines: 0,
|
|
2794
|
+
message: `Failed to read file: ${error instanceof Error ? error.message : String(error)}`,
|
|
2795
|
+
error: error instanceof Error ? error.message : String(error)
|
|
2796
|
+
};
|
|
2797
|
+
}
|
|
2798
|
+
}
|
|
2408
2799
|
/**
|
|
2409
2800
|
* Ask user for clarification
|
|
2410
2801
|
*/
|
|
@@ -2900,10 +3291,10 @@ var WriteToDiskProcessor = class extends MemoryProcessor {
|
|
|
2900
3291
|
return messages;
|
|
2901
3292
|
}
|
|
2902
3293
|
};
|
|
2903
|
-
var
|
|
3294
|
+
var resolveModel2 = (runtimeContext) => {
|
|
2904
3295
|
const modelFromContext = runtimeContext.get("model");
|
|
2905
3296
|
if (modelFromContext) {
|
|
2906
|
-
if (
|
|
3297
|
+
if (isValidMastraLanguageModel2(modelFromContext)) {
|
|
2907
3298
|
return modelFromContext;
|
|
2908
3299
|
}
|
|
2909
3300
|
throw new Error(
|
|
@@ -2912,7 +3303,7 @@ var resolveModel = (runtimeContext) => {
|
|
|
2912
3303
|
}
|
|
2913
3304
|
return openai("gpt-4.1");
|
|
2914
3305
|
};
|
|
2915
|
-
var
|
|
3306
|
+
var isValidMastraLanguageModel2 = (model) => {
|
|
2916
3307
|
return model && typeof model === "object" && typeof model.modelId === "string" && typeof model.generate === "function";
|
|
2917
3308
|
};
|
|
2918
3309
|
var cloneTemplateStep = createStep({
|
|
@@ -3003,7 +3394,7 @@ var discoverUnitsStep = createStep({
|
|
|
3003
3394
|
const tools = await AgentBuilderDefaults.DEFAULT_TOOLS(templateDir);
|
|
3004
3395
|
try {
|
|
3005
3396
|
const agent = new Agent({
|
|
3006
|
-
model:
|
|
3397
|
+
model: resolveModel2(runtimeContext),
|
|
3007
3398
|
instructions: `You are an expert at analyzing Mastra projects.
|
|
3008
3399
|
|
|
3009
3400
|
Your task is to scan the provided directory and identify all available units (agents, workflows, tools, MCP servers, networks).
|
|
@@ -3135,7 +3526,7 @@ var prepareBranchStep = createStep({
|
|
|
3135
3526
|
inputSchema: PrepareBranchInputSchema,
|
|
3136
3527
|
outputSchema: PrepareBranchResultSchema,
|
|
3137
3528
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3138
|
-
const targetPath = inputData
|
|
3529
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3139
3530
|
try {
|
|
3140
3531
|
const branchName = `feat/install-template-${inputData.slug}`;
|
|
3141
3532
|
await gitCheckoutBranch(branchName, targetPath);
|
|
@@ -3162,7 +3553,7 @@ var packageMergeStep = createStep({
|
|
|
3162
3553
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3163
3554
|
console.log("Package merge step starting...");
|
|
3164
3555
|
const { slug, packageInfo } = inputData;
|
|
3165
|
-
const targetPath = inputData
|
|
3556
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3166
3557
|
try {
|
|
3167
3558
|
const targetPkgPath = join(targetPath, "package.json");
|
|
3168
3559
|
let targetPkgRaw = "{}";
|
|
@@ -3238,7 +3629,7 @@ var installStep = createStep({
|
|
|
3238
3629
|
outputSchema: InstallResultSchema,
|
|
3239
3630
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3240
3631
|
console.log("Running install step...");
|
|
3241
|
-
const targetPath = inputData
|
|
3632
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3242
3633
|
try {
|
|
3243
3634
|
await spawnSWPM(targetPath, "install", []);
|
|
3244
3635
|
const lock = ["pnpm-lock.yaml", "package-lock.json", "yarn.lock"].map((f) => join(targetPath, f)).find((f) => existsSync(f));
|
|
@@ -3267,7 +3658,7 @@ var programmaticFileCopyStep = createStep({
|
|
|
3267
3658
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3268
3659
|
console.log("Programmatic file copy step starting...");
|
|
3269
3660
|
const { orderedUnits, templateDir, commitSha, slug } = inputData;
|
|
3270
|
-
const targetPath = inputData
|
|
3661
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3271
3662
|
try {
|
|
3272
3663
|
const copiedFiles = [];
|
|
3273
3664
|
const conflicts = [];
|
|
@@ -3346,7 +3737,7 @@ var programmaticFileCopyStep = createStep({
|
|
|
3346
3737
|
const convertedFileName = namingConvention !== "unknown" ? convertNaming(baseId + fileExtension, namingConvention) : baseId + fileExtension;
|
|
3347
3738
|
const targetFile = resolve(targetPath, targetDir, convertedFileName);
|
|
3348
3739
|
if (existsSync(targetFile)) {
|
|
3349
|
-
const strategy = determineConflictStrategy();
|
|
3740
|
+
const strategy = determineConflictStrategy(unit, targetFile);
|
|
3350
3741
|
console.log(`File exists: ${convertedFileName}, using strategy: ${strategy}`);
|
|
3351
3742
|
switch (strategy) {
|
|
3352
3743
|
case "skip":
|
|
@@ -3537,7 +3928,7 @@ var intelligentMergeStep = createStep({
|
|
|
3537
3928
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3538
3929
|
console.log("Intelligent merge step starting...");
|
|
3539
3930
|
const { conflicts, copiedFiles, commitSha, slug, templateDir, branchName } = inputData;
|
|
3540
|
-
const targetPath = inputData
|
|
3931
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3541
3932
|
try {
|
|
3542
3933
|
const copyFileTool = createTool({
|
|
3543
3934
|
id: "copy-file",
|
|
@@ -3576,7 +3967,7 @@ var intelligentMergeStep = createStep({
|
|
|
3576
3967
|
const agentBuilder = new AgentBuilder({
|
|
3577
3968
|
projectPath: targetPath,
|
|
3578
3969
|
mode: "template",
|
|
3579
|
-
model:
|
|
3970
|
+
model: resolveModel2(runtimeContext),
|
|
3580
3971
|
instructions: `
|
|
3581
3972
|
You are an expert at integrating Mastra template components into existing projects.
|
|
3582
3973
|
|
|
@@ -3798,7 +4189,7 @@ var validationAndFixStep = createStep({
|
|
|
3798
4189
|
execute: async ({ inputData, runtimeContext }) => {
|
|
3799
4190
|
console.log("Validation and fix step starting...");
|
|
3800
4191
|
const { commitSha, slug, orderedUnits, templateDir, copiedFiles, conflictsResolved, maxIterations = 5 } = inputData;
|
|
3801
|
-
const targetPath = inputData
|
|
4192
|
+
const targetPath = resolveTargetPath(inputData, runtimeContext);
|
|
3802
4193
|
const hasChanges = copiedFiles.length > 0 || conflictsResolved && conflictsResolved.length > 0;
|
|
3803
4194
|
if (!hasChanges) {
|
|
3804
4195
|
console.log("\u23ED\uFE0F Skipping validation - no files copied or conflicts resolved");
|
|
@@ -3927,7 +4318,7 @@ INTEGRATED UNITS:
|
|
|
3927
4318
|
${JSON.stringify(orderedUnits, null, 2)}
|
|
3928
4319
|
|
|
3929
4320
|
Be thorough and methodical. Always use listDirectory to verify actual file existence before fixing imports.`,
|
|
3930
|
-
model:
|
|
4321
|
+
model: resolveModel2(runtimeContext),
|
|
3931
4322
|
tools: {
|
|
3932
4323
|
validateCode: allTools.validateCode,
|
|
3933
4324
|
readFile: allTools.readFile,
|
|
@@ -4217,6 +4608,1389 @@ var determineConflictStrategy = (_unit, _targetFile) => {
|
|
|
4217
4608
|
var shouldAbortWorkflow = (stepResult) => {
|
|
4218
4609
|
return stepResult?.success === false || stepResult?.error;
|
|
4219
4610
|
};
|
|
4611
|
+
var TaskSchema = z.array(
|
|
4612
|
+
z.object({
|
|
4613
|
+
id: z.string().describe("Unique task ID using kebab-case"),
|
|
4614
|
+
content: z.string().describe("Specific, actionable task description"),
|
|
4615
|
+
status: z.enum(["pending", "in_progress", "completed", "blocked"]).default("pending"),
|
|
4616
|
+
priority: z.enum(["high", "medium", "low"]).describe("Task priority"),
|
|
4617
|
+
dependencies: z.array(z.string()).optional().describe("IDs of tasks this depends on"),
|
|
4618
|
+
notes: z.string().describe("Detailed implementation notes and specifics")
|
|
4619
|
+
})
|
|
4620
|
+
);
|
|
4621
|
+
var QuestionSchema = z.array(
|
|
4622
|
+
z.object({
|
|
4623
|
+
id: z.string().describe("Unique question ID"),
|
|
4624
|
+
question: z.string().describe("Clear, specific question for the user"),
|
|
4625
|
+
type: z.enum(["choice", "text", "boolean"]).describe("Type of answer expected"),
|
|
4626
|
+
options: z.array(z.string()).optional().describe("Options for choice questions"),
|
|
4627
|
+
context: z.string().optional().describe("Additional context or explanation")
|
|
4628
|
+
})
|
|
4629
|
+
);
|
|
4630
|
+
var PlanningIterationResultSchema = z.object({
|
|
4631
|
+
success: z.boolean(),
|
|
4632
|
+
tasks: TaskSchema,
|
|
4633
|
+
questions: QuestionSchema,
|
|
4634
|
+
reasoning: z.string(),
|
|
4635
|
+
planComplete: z.boolean(),
|
|
4636
|
+
message: z.string(),
|
|
4637
|
+
error: z.string().optional(),
|
|
4638
|
+
allPreviousQuestions: z.array(z.any()).optional(),
|
|
4639
|
+
allPreviousAnswers: z.record(z.string()).optional()
|
|
4640
|
+
});
|
|
4641
|
+
|
|
4642
|
+
// src/workflows/task-planning/prompts.ts
|
|
4643
|
+
var taskPlanningPrompts = {
|
|
4644
|
+
planningAgent: {
|
|
4645
|
+
instructions: (context) => `You are a Mastra workflow planning expert. Your task is to create a detailed, executable task plan.
|
|
4646
|
+
|
|
4647
|
+
PLANNING RESPONSIBILITIES:
|
|
4648
|
+
1. **Analyze Requirements**: Review the user's description and requirements thoroughly
|
|
4649
|
+
2. **Identify Decision Points**: Find any choices that require user input (email providers, databases, APIs, etc.)
|
|
4650
|
+
3. **Create Specific Tasks**: Generate concrete, actionable tasks with clear implementation notes
|
|
4651
|
+
4. **Ask Clarifying Questions**: If any decisions are unclear, formulate specific questions for the user
|
|
4652
|
+
- do not ask about package managers
|
|
4653
|
+
- Assume the user is going to use zod for validation
|
|
4654
|
+
- You do not need to ask questions if you have none
|
|
4655
|
+
- NEVER ask questions that have already been answered before
|
|
4656
|
+
5. **Incorporate Feedback**: Use any previous answers or feedback to refine the plan
|
|
4657
|
+
|
|
4658
|
+
${context.storedQAPairs.length > 0 ? `PREVIOUS QUESTION-ANSWER PAIRS (${context.storedQAPairs.length} total):
|
|
4659
|
+
${context.storedQAPairs.map(
|
|
4660
|
+
(pair, index) => `${index + 1}. Q: ${pair.question.question}
|
|
4661
|
+
A: ${pair.answer || "NOT ANSWERED YET"}
|
|
4662
|
+
Type: ${pair.question.type}
|
|
4663
|
+
Asked: ${pair.askedAt}
|
|
4664
|
+
${pair.answer ? `Answered: ${pair.answeredAt}` : ""}`
|
|
4665
|
+
).join("\n\n")}
|
|
4666
|
+
|
|
4667
|
+
IMPORTANT: DO NOT ASK ANY QUESTIONS THAT HAVE ALREADY BEEN ASKED!` : ""}
|
|
4668
|
+
|
|
4669
|
+
Based on the context and any user answers, create or refine the task plan.`,
|
|
4670
|
+
refinementPrompt: (context) => `Refine the existing task plan based on all user answers collected so far.
|
|
4671
|
+
|
|
4672
|
+
ANSWERED QUESTIONS AND RESPONSES:
|
|
4673
|
+
${context.storedQAPairs.filter((pair) => pair.answer).map(
|
|
4674
|
+
(pair, index) => `${index + 1}. Q: ${pair.question.question}
|
|
4675
|
+
A: ${pair.answer}
|
|
4676
|
+
Context: ${pair.question.context || "None"}`
|
|
4677
|
+
).join("\n\n")}
|
|
4678
|
+
|
|
4679
|
+
REQUIREMENTS:
|
|
4680
|
+
- Action: ${context.action}
|
|
4681
|
+
- Workflow Name: ${context.workflowName || "To be determined"}
|
|
4682
|
+
- Description: ${context.description || "Not specified"}
|
|
4683
|
+
- Requirements: ${context.requirements || "Not specified"}
|
|
4684
|
+
|
|
4685
|
+
PROJECT CONTEXT:
|
|
4686
|
+
- Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
|
|
4687
|
+
- Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
|
|
4688
|
+
- Research: ${JSON.stringify(context.research, null, 2)}
|
|
4689
|
+
|
|
4690
|
+
${context.hasTaskFeedback ? `
|
|
4691
|
+
USER FEEDBACK ON PREVIOUS TASK LIST:
|
|
4692
|
+
${context.userAnswers?.taskFeedback}
|
|
4693
|
+
|
|
4694
|
+
PLEASE INCORPORATE THIS FEEDBACK INTO THE REFINED TASK LIST.` : ""}
|
|
4695
|
+
|
|
4696
|
+
Refine the task list and determine if any additional questions are needed.`,
|
|
4697
|
+
initialPrompt: (context) => `Create an initial task plan for ${context.action}ing a Mastra workflow.
|
|
4698
|
+
|
|
4699
|
+
REQUIREMENTS:
|
|
4700
|
+
- Action: ${context.action}
|
|
4701
|
+
- Workflow Name: ${context.workflowName || "To be determined"}
|
|
4702
|
+
- Description: ${context.description || "Not specified"}
|
|
4703
|
+
- Requirements: ${context.requirements || "Not specified"}
|
|
4704
|
+
|
|
4705
|
+
PROJECT CONTEXT:
|
|
4706
|
+
- Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
|
|
4707
|
+
- Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
|
|
4708
|
+
- Research: ${JSON.stringify(context.research, null, 2)}
|
|
4709
|
+
|
|
4710
|
+
Create specific tasks and identify any questions that need user clarification.`
|
|
4711
|
+
},
|
|
4712
|
+
taskApproval: {
|
|
4713
|
+
message: (questionsCount) => `Please answer ${questionsCount} question(s) to finalize the workflow plan:`,
|
|
4714
|
+
approvalMessage: (tasksCount) => `Please review and approve the ${tasksCount} task(s) for execution:`
|
|
4715
|
+
}
|
|
4716
|
+
};
|
|
4717
|
+
var WorkflowBuilderInputSchema = z.object({
|
|
4718
|
+
workflowName: z.string().optional().describe("Name of the workflow to create or edit"),
|
|
4719
|
+
action: z.enum(["create", "edit"]).describe("Action to perform: create new or edit existing workflow"),
|
|
4720
|
+
description: z.string().optional().describe("Description of what the workflow should do"),
|
|
4721
|
+
requirements: z.string().optional().describe("Detailed requirements for the workflow"),
|
|
4722
|
+
projectPath: z.string().optional().describe("Path to the Mastra project (defaults to current directory)")
|
|
4723
|
+
});
|
|
4724
|
+
var DiscoveredWorkflowSchema = z.object({
|
|
4725
|
+
name: z.string(),
|
|
4726
|
+
file: z.string(),
|
|
4727
|
+
description: z.string().optional(),
|
|
4728
|
+
inputSchema: z.any().optional(),
|
|
4729
|
+
outputSchema: z.any().optional(),
|
|
4730
|
+
steps: z.array(z.string()).optional()
|
|
4731
|
+
});
|
|
4732
|
+
var WorkflowDiscoveryResultSchema = z.object({
|
|
4733
|
+
success: z.boolean(),
|
|
4734
|
+
workflows: z.array(DiscoveredWorkflowSchema),
|
|
4735
|
+
mastraIndexExists: z.boolean(),
|
|
4736
|
+
message: z.string(),
|
|
4737
|
+
error: z.string().optional()
|
|
4738
|
+
});
|
|
4739
|
+
var ProjectDiscoveryResultSchema = z.object({
|
|
4740
|
+
success: z.boolean(),
|
|
4741
|
+
structure: z.object({
|
|
4742
|
+
hasWorkflowsDir: z.boolean(),
|
|
4743
|
+
hasAgentsDir: z.boolean(),
|
|
4744
|
+
hasToolsDir: z.boolean(),
|
|
4745
|
+
hasMastraIndex: z.boolean(),
|
|
4746
|
+
existingWorkflows: z.array(z.string()),
|
|
4747
|
+
existingAgents: z.array(z.string()),
|
|
4748
|
+
existingTools: z.array(z.string())
|
|
4749
|
+
}),
|
|
4750
|
+
dependencies: z.record(z.string()),
|
|
4751
|
+
message: z.string(),
|
|
4752
|
+
error: z.string().optional()
|
|
4753
|
+
});
|
|
4754
|
+
var WorkflowResearchResultSchema = z.object({
|
|
4755
|
+
success: z.boolean(),
|
|
4756
|
+
documentation: z.object({
|
|
4757
|
+
workflowPatterns: z.array(z.string()),
|
|
4758
|
+
stepExamples: z.array(z.string()),
|
|
4759
|
+
bestPractices: z.array(z.string())
|
|
4760
|
+
}),
|
|
4761
|
+
webResources: z.array(
|
|
4762
|
+
z.object({
|
|
4763
|
+
title: z.string(),
|
|
4764
|
+
url: z.string(),
|
|
4765
|
+
snippet: z.string(),
|
|
4766
|
+
relevance: z.number()
|
|
4767
|
+
})
|
|
4768
|
+
),
|
|
4769
|
+
message: z.string(),
|
|
4770
|
+
error: z.string().optional()
|
|
4771
|
+
});
|
|
4772
|
+
var TaskManagementResultSchema = z.object({
|
|
4773
|
+
success: z.boolean(),
|
|
4774
|
+
tasks: TaskSchema,
|
|
4775
|
+
message: z.string(),
|
|
4776
|
+
error: z.string().optional()
|
|
4777
|
+
});
|
|
4778
|
+
var TaskExecutionInputSchema = z.object({
|
|
4779
|
+
action: z.enum(["create", "edit"]),
|
|
4780
|
+
workflowName: z.string().optional(),
|
|
4781
|
+
description: z.string().optional(),
|
|
4782
|
+
requirements: z.string().optional(),
|
|
4783
|
+
tasks: TaskSchema,
|
|
4784
|
+
discoveredWorkflows: z.array(z.any()),
|
|
4785
|
+
projectStructure: z.any(),
|
|
4786
|
+
research: z.any(),
|
|
4787
|
+
projectPath: z.string().optional()
|
|
4788
|
+
});
|
|
4789
|
+
var TaskExecutionSuspendSchema = z.object({
|
|
4790
|
+
questions: QuestionSchema,
|
|
4791
|
+
currentProgress: z.string(),
|
|
4792
|
+
completedTasks: z.array(z.string()),
|
|
4793
|
+
message: z.string()
|
|
4794
|
+
});
|
|
4795
|
+
var TaskExecutionResumeSchema = z.object({
|
|
4796
|
+
answers: z.array(
|
|
4797
|
+
z.object({
|
|
4798
|
+
questionId: z.string(),
|
|
4799
|
+
answer: z.string()
|
|
4800
|
+
})
|
|
4801
|
+
)
|
|
4802
|
+
});
|
|
4803
|
+
var TaskExecutionResultSchema = z.object({
|
|
4804
|
+
success: z.boolean(),
|
|
4805
|
+
filesModified: z.array(z.string()),
|
|
4806
|
+
validationResults: z.object({
|
|
4807
|
+
passed: z.boolean(),
|
|
4808
|
+
errors: z.array(z.string()),
|
|
4809
|
+
warnings: z.array(z.string())
|
|
4810
|
+
}),
|
|
4811
|
+
completedTasks: z.array(z.string()),
|
|
4812
|
+
message: z.string(),
|
|
4813
|
+
error: z.string().optional()
|
|
4814
|
+
});
|
|
4815
|
+
z.object({
|
|
4816
|
+
questions: QuestionSchema
|
|
4817
|
+
});
|
|
4818
|
+
z.object({
|
|
4819
|
+
answers: z.record(z.string()),
|
|
4820
|
+
hasAnswers: z.boolean()
|
|
4821
|
+
});
|
|
4822
|
+
var WorkflowBuilderResultSchema = z.object({
|
|
4823
|
+
success: z.boolean(),
|
|
4824
|
+
action: z.enum(["create", "edit"]),
|
|
4825
|
+
workflowName: z.string().optional(),
|
|
4826
|
+
workflowFile: z.string().optional(),
|
|
4827
|
+
discovery: WorkflowDiscoveryResultSchema.optional(),
|
|
4828
|
+
projectStructure: ProjectDiscoveryResultSchema.optional(),
|
|
4829
|
+
research: WorkflowResearchResultSchema.optional(),
|
|
4830
|
+
planning: PlanningIterationResultSchema.optional(),
|
|
4831
|
+
taskManagement: TaskManagementResultSchema.optional(),
|
|
4832
|
+
execution: TaskExecutionResultSchema.optional(),
|
|
4833
|
+
needsUserInput: z.boolean().optional(),
|
|
4834
|
+
questions: QuestionSchema.optional(),
|
|
4835
|
+
message: z.string(),
|
|
4836
|
+
nextSteps: z.array(z.string()).optional(),
|
|
4837
|
+
error: z.string().optional()
|
|
4838
|
+
});
|
|
4839
|
+
var TaskExecutionIterationInputSchema = (taskLength) => z.object({
|
|
4840
|
+
status: z.enum(["in_progress", "completed", "needs_clarification"]).describe('Status - only use "completed" when ALL remaining tasks are finished'),
|
|
4841
|
+
progress: z.string().describe("Current progress description"),
|
|
4842
|
+
completedTasks: z.array(z.string()).describe("List of ALL completed task IDs (including previously completed ones)"),
|
|
4843
|
+
totalTasksRequired: z.number().describe(`Total number of tasks that must be completed (should be ${taskLength})`),
|
|
4844
|
+
tasksRemaining: z.array(z.string()).describe("List of task IDs that still need to be completed"),
|
|
4845
|
+
filesModified: z.array(z.string()).describe("List of files that were created or modified - use these exact paths for validateCode tool"),
|
|
4846
|
+
questions: QuestionSchema.optional().describe("Questions for user if clarification is needed"),
|
|
4847
|
+
message: z.string().describe("Summary of work completed or current status"),
|
|
4848
|
+
error: z.string().optional().describe("Any errors encountered")
|
|
4849
|
+
});
|
|
4850
|
+
|
|
4851
|
+
// src/workflows/task-planning/schema.ts
|
|
4852
|
+
var PlanningIterationInputSchema = z.object({
|
|
4853
|
+
action: z.enum(["create", "edit"]),
|
|
4854
|
+
workflowName: z.string().optional(),
|
|
4855
|
+
description: z.string().optional(),
|
|
4856
|
+
requirements: z.string().optional(),
|
|
4857
|
+
discoveredWorkflows: z.array(DiscoveredWorkflowSchema),
|
|
4858
|
+
projectStructure: ProjectDiscoveryResultSchema,
|
|
4859
|
+
research: WorkflowResearchResultSchema,
|
|
4860
|
+
userAnswers: z.record(z.string()).optional()
|
|
4861
|
+
});
|
|
4862
|
+
var PlanningIterationSuspendSchema = z.object({
|
|
4863
|
+
questions: QuestionSchema,
|
|
4864
|
+
message: z.string(),
|
|
4865
|
+
currentPlan: z.object({
|
|
4866
|
+
tasks: TaskSchema,
|
|
4867
|
+
reasoning: z.string()
|
|
4868
|
+
})
|
|
4869
|
+
});
|
|
4870
|
+
var PlanningIterationResumeSchema = z.object({
|
|
4871
|
+
answers: z.record(z.string())
|
|
4872
|
+
});
|
|
4873
|
+
var PlanningAgentOutputSchema = z.object({
|
|
4874
|
+
tasks: TaskSchema,
|
|
4875
|
+
questions: QuestionSchema.optional(),
|
|
4876
|
+
reasoning: z.string().describe("Explanation of the plan and any questions"),
|
|
4877
|
+
planComplete: z.boolean().describe("Whether the plan is ready for execution (no more questions)")
|
|
4878
|
+
});
|
|
4879
|
+
var TaskApprovalOutputSchema = z.object({
|
|
4880
|
+
approved: z.boolean(),
|
|
4881
|
+
tasks: TaskSchema,
|
|
4882
|
+
message: z.string(),
|
|
4883
|
+
userFeedback: z.string().optional()
|
|
4884
|
+
});
|
|
4885
|
+
var TaskApprovalSuspendSchema = z.object({
|
|
4886
|
+
taskList: TaskSchema,
|
|
4887
|
+
summary: z.string(),
|
|
4888
|
+
message: z.string()
|
|
4889
|
+
});
|
|
4890
|
+
var TaskApprovalResumeSchema = z.object({
|
|
4891
|
+
approved: z.boolean(),
|
|
4892
|
+
modifications: z.string().optional()
|
|
4893
|
+
});
|
|
4894
|
+
|
|
4895
|
+
// src/workflows/task-planning/task-planning.ts
|
|
4896
|
+
var planningIterationStep = createStep$1({
|
|
4897
|
+
id: "planning-iteration",
|
|
4898
|
+
description: "Create or refine task plan with user input",
|
|
4899
|
+
inputSchema: PlanningIterationInputSchema,
|
|
4900
|
+
outputSchema: PlanningIterationResultSchema,
|
|
4901
|
+
suspendSchema: PlanningIterationSuspendSchema,
|
|
4902
|
+
resumeSchema: PlanningIterationResumeSchema,
|
|
4903
|
+
execute: async ({ inputData, resumeData, suspend, runtimeContext }) => {
|
|
4904
|
+
const {
|
|
4905
|
+
action,
|
|
4906
|
+
workflowName,
|
|
4907
|
+
description,
|
|
4908
|
+
requirements,
|
|
4909
|
+
discoveredWorkflows,
|
|
4910
|
+
projectStructure,
|
|
4911
|
+
research,
|
|
4912
|
+
userAnswers
|
|
4913
|
+
} = inputData;
|
|
4914
|
+
console.log("Starting planning iteration...");
|
|
4915
|
+
const qaKey = "workflow-builder-qa";
|
|
4916
|
+
let storedQAPairs = runtimeContext.get(qaKey) || [];
|
|
4917
|
+
const newAnswers = { ...userAnswers || {}, ...resumeData?.answers || {} };
|
|
4918
|
+
console.log("before", storedQAPairs);
|
|
4919
|
+
console.log("newAnswers", newAnswers);
|
|
4920
|
+
if (Object.keys(newAnswers).length > 0) {
|
|
4921
|
+
storedQAPairs = storedQAPairs.map((pair) => {
|
|
4922
|
+
if (newAnswers[pair.question.id]) {
|
|
4923
|
+
return {
|
|
4924
|
+
...pair,
|
|
4925
|
+
answer: newAnswers[pair.question.id] || null,
|
|
4926
|
+
answeredAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
4927
|
+
};
|
|
4928
|
+
}
|
|
4929
|
+
return pair;
|
|
4930
|
+
});
|
|
4931
|
+
runtimeContext.set(qaKey, storedQAPairs);
|
|
4932
|
+
}
|
|
4933
|
+
console.log("after", storedQAPairs);
|
|
4934
|
+
console.log(
|
|
4935
|
+
`Current Q&A state: ${storedQAPairs.length} question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
|
|
4936
|
+
);
|
|
4937
|
+
try {
|
|
4938
|
+
const planningAgent = new Agent$1({
|
|
4939
|
+
model: resolveModel(runtimeContext),
|
|
4940
|
+
instructions: taskPlanningPrompts.planningAgent.instructions({
|
|
4941
|
+
storedQAPairs
|
|
4942
|
+
}),
|
|
4943
|
+
name: "Workflow Planning Agent"
|
|
4944
|
+
// tools: filteredMcpTools,
|
|
4945
|
+
});
|
|
4946
|
+
const hasTaskFeedback = Boolean(userAnswers && userAnswers.taskFeedback);
|
|
4947
|
+
const planningPrompt = storedQAPairs.some((pair) => pair.answer) ? taskPlanningPrompts.planningAgent.refinementPrompt({
|
|
4948
|
+
action,
|
|
4949
|
+
workflowName,
|
|
4950
|
+
description,
|
|
4951
|
+
requirements,
|
|
4952
|
+
discoveredWorkflows,
|
|
4953
|
+
projectStructure,
|
|
4954
|
+
research,
|
|
4955
|
+
storedQAPairs,
|
|
4956
|
+
hasTaskFeedback,
|
|
4957
|
+
userAnswers
|
|
4958
|
+
}) : taskPlanningPrompts.planningAgent.initialPrompt({
|
|
4959
|
+
action,
|
|
4960
|
+
workflowName,
|
|
4961
|
+
description,
|
|
4962
|
+
requirements,
|
|
4963
|
+
discoveredWorkflows,
|
|
4964
|
+
projectStructure,
|
|
4965
|
+
research
|
|
4966
|
+
});
|
|
4967
|
+
const result = await planningAgent.generateVNext(planningPrompt, {
|
|
4968
|
+
output: PlanningAgentOutputSchema
|
|
4969
|
+
// maxSteps: 15,
|
|
4970
|
+
});
|
|
4971
|
+
const planResult = await result.object;
|
|
4972
|
+
if (!planResult) {
|
|
4973
|
+
return {
|
|
4974
|
+
tasks: [],
|
|
4975
|
+
success: false,
|
|
4976
|
+
questions: [],
|
|
4977
|
+
reasoning: "Planning agent failed to generate a valid response",
|
|
4978
|
+
planComplete: false,
|
|
4979
|
+
message: "Planning failed"
|
|
4980
|
+
};
|
|
4981
|
+
}
|
|
4982
|
+
if (planResult.questions && planResult.questions.length > 0 && !planResult.planComplete) {
|
|
4983
|
+
console.log(`Planning needs user clarification: ${planResult.questions.length} questions`);
|
|
4984
|
+
console.log(planResult.questions);
|
|
4985
|
+
const newQAPairs = planResult.questions.map((question) => ({
|
|
4986
|
+
question,
|
|
4987
|
+
answer: null,
|
|
4988
|
+
askedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
4989
|
+
answeredAt: null
|
|
4990
|
+
}));
|
|
4991
|
+
storedQAPairs = [...storedQAPairs, ...newQAPairs];
|
|
4992
|
+
runtimeContext.set(qaKey, storedQAPairs);
|
|
4993
|
+
console.log(
|
|
4994
|
+
`Updated Q&A state: ${storedQAPairs.length} total question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
|
|
4995
|
+
);
|
|
4996
|
+
return suspend({
|
|
4997
|
+
questions: planResult.questions,
|
|
4998
|
+
message: taskPlanningPrompts.taskApproval.message(planResult.questions.length),
|
|
4999
|
+
currentPlan: {
|
|
5000
|
+
tasks: planResult.tasks,
|
|
5001
|
+
reasoning: planResult.reasoning
|
|
5002
|
+
}
|
|
5003
|
+
});
|
|
5004
|
+
}
|
|
5005
|
+
console.log(`Planning complete with ${planResult.tasks.length} tasks`);
|
|
5006
|
+
runtimeContext.set(qaKey, storedQAPairs);
|
|
5007
|
+
console.log(
|
|
5008
|
+
`Final Q&A state: ${storedQAPairs.length} total question-answer pairs, ${storedQAPairs.filter((p) => p.answer).length} answered`
|
|
5009
|
+
);
|
|
5010
|
+
return {
|
|
5011
|
+
tasks: planResult.tasks,
|
|
5012
|
+
success: true,
|
|
5013
|
+
questions: [],
|
|
5014
|
+
reasoning: planResult.reasoning,
|
|
5015
|
+
planComplete: true,
|
|
5016
|
+
message: `Successfully created ${planResult.tasks.length} tasks`,
|
|
5017
|
+
allPreviousQuestions: storedQAPairs.map((pair) => pair.question),
|
|
5018
|
+
allPreviousAnswers: Object.fromEntries(
|
|
5019
|
+
storedQAPairs.filter((pair) => pair.answer).map((pair) => [pair.question.id, pair.answer])
|
|
5020
|
+
)
|
|
5021
|
+
};
|
|
5022
|
+
} catch (error) {
|
|
5023
|
+
console.error("Planning iteration failed:", error);
|
|
5024
|
+
return {
|
|
5025
|
+
tasks: [],
|
|
5026
|
+
success: false,
|
|
5027
|
+
questions: [],
|
|
5028
|
+
reasoning: `Planning failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
5029
|
+
planComplete: false,
|
|
5030
|
+
message: "Planning iteration failed",
|
|
5031
|
+
error: error instanceof Error ? error.message : String(error),
|
|
5032
|
+
allPreviousQuestions: storedQAPairs.map((pair) => pair.question),
|
|
5033
|
+
allPreviousAnswers: Object.fromEntries(
|
|
5034
|
+
storedQAPairs.filter((pair) => pair.answer).map((pair) => [pair.question.id, pair.answer])
|
|
5035
|
+
)
|
|
5036
|
+
};
|
|
5037
|
+
}
|
|
5038
|
+
}
|
|
5039
|
+
});
|
|
5040
|
+
var taskApprovalStep = createStep$1({
|
|
5041
|
+
id: "task-approval",
|
|
5042
|
+
description: "Get user approval for the final task list",
|
|
5043
|
+
inputSchema: PlanningIterationResultSchema,
|
|
5044
|
+
outputSchema: TaskApprovalOutputSchema,
|
|
5045
|
+
suspendSchema: TaskApprovalSuspendSchema,
|
|
5046
|
+
resumeSchema: TaskApprovalResumeSchema,
|
|
5047
|
+
execute: async ({ inputData, resumeData, suspend }) => {
|
|
5048
|
+
const { tasks } = inputData;
|
|
5049
|
+
if (!resumeData?.approved && resumeData?.approved !== false) {
|
|
5050
|
+
console.log(`Requesting user approval for ${tasks.length} tasks`);
|
|
5051
|
+
const summary = `Task List for Approval:
|
|
5052
|
+
|
|
5053
|
+
${tasks.length} tasks planned:
|
|
5054
|
+
${tasks.map((task, i) => `${i + 1}. [${task.priority.toUpperCase()}] ${task.content}${task.dependencies?.length ? ` (depends on: ${task.dependencies.join(", ")})` : ""}
|
|
5055
|
+
Notes: ${task.notes || "None"}`).join("\n")}`;
|
|
5056
|
+
return suspend({
|
|
5057
|
+
taskList: tasks,
|
|
5058
|
+
summary,
|
|
5059
|
+
message: taskPlanningPrompts.taskApproval.approvalMessage(tasks.length)
|
|
5060
|
+
});
|
|
5061
|
+
}
|
|
5062
|
+
if (resumeData.approved) {
|
|
5063
|
+
console.log("Task list approved by user");
|
|
5064
|
+
return {
|
|
5065
|
+
approved: true,
|
|
5066
|
+
tasks,
|
|
5067
|
+
message: "Task list approved, ready for execution"
|
|
5068
|
+
};
|
|
5069
|
+
} else {
|
|
5070
|
+
console.log("Task list rejected by user");
|
|
5071
|
+
return {
|
|
5072
|
+
approved: false,
|
|
5073
|
+
tasks,
|
|
5074
|
+
message: "Task list rejected",
|
|
5075
|
+
userFeedback: resumeData.modifications
|
|
5076
|
+
};
|
|
5077
|
+
}
|
|
5078
|
+
}
|
|
5079
|
+
});
|
|
5080
|
+
var planningAndApprovalWorkflow = createWorkflow$1({
|
|
5081
|
+
id: "planning-and-approval",
|
|
5082
|
+
description: "Handle iterative planning with questions and task list approval",
|
|
5083
|
+
inputSchema: PlanningIterationInputSchema,
|
|
5084
|
+
outputSchema: TaskApprovalOutputSchema,
|
|
5085
|
+
steps: [planningIterationStep, taskApprovalStep]
|
|
5086
|
+
}).dountil(planningIterationStep, async ({ inputData }) => {
|
|
5087
|
+
console.log(`Sub-workflow planning check: planComplete=${inputData.planComplete}`);
|
|
5088
|
+
return inputData.planComplete === true;
|
|
5089
|
+
}).map(async ({ inputData }) => {
|
|
5090
|
+
return {
|
|
5091
|
+
tasks: inputData.tasks || [],
|
|
5092
|
+
success: inputData.success || false,
|
|
5093
|
+
questions: inputData.questions || [],
|
|
5094
|
+
reasoning: inputData.reasoning || "",
|
|
5095
|
+
planComplete: inputData.planComplete || false,
|
|
5096
|
+
message: inputData.message || ""
|
|
5097
|
+
};
|
|
5098
|
+
}).then(taskApprovalStep).commit();
|
|
5099
|
+
|
|
5100
|
+
// src/workflows/workflow-builder/prompts.ts
|
|
5101
|
+
var workflowResearch = `
|
|
5102
|
+
## \u{1F50D} **COMPREHENSIVE MASTRA WORKFLOW RESEARCH SUMMARY**
|
|
5103
|
+
|
|
5104
|
+
Based on extensive research of Mastra documentation and examples, here's essential information for building effective Mastra workflows:
|
|
5105
|
+
|
|
5106
|
+
### **\u{1F4CB} WORKFLOW FUNDAMENTALS**
|
|
5107
|
+
|
|
5108
|
+
**Core Components:**
|
|
5109
|
+
- **\`createWorkflow()\`**: Main factory function that creates workflow instances
|
|
5110
|
+
- **\`createStep()\`**: Creates individual workflow steps with typed inputs/outputs
|
|
5111
|
+
- **\`.commit()\`**: Finalizes workflow definition (REQUIRED to make workflows executable)
|
|
5112
|
+
- **Zod schemas**: Used for strict input/output typing and validation
|
|
5113
|
+
|
|
5114
|
+
**Basic Structure:**
|
|
5115
|
+
\`\`\`typescript
|
|
5116
|
+
import { createWorkflow, createStep } from "@mastra/core/workflows";
|
|
5117
|
+
import { z } from "zod";
|
|
5118
|
+
|
|
5119
|
+
const workflow = createWorkflow({
|
|
5120
|
+
id: "unique-workflow-id", // Required: kebab-case recommended
|
|
5121
|
+
description: "What this workflow does", // Optional but recommended
|
|
5122
|
+
inputSchema: z.object({...}), // Required: Defines workflow inputs
|
|
5123
|
+
outputSchema: z.object({...}) // Required: Defines final outputs
|
|
5124
|
+
})
|
|
5125
|
+
.then(step1) // Chain steps sequentially
|
|
5126
|
+
.then(step2)
|
|
5127
|
+
.commit(); // CRITICAL: Makes workflow executable
|
|
5128
|
+
\`\`\`
|
|
5129
|
+
|
|
5130
|
+
### **\u{1F527} STEP CREATION PATTERNS**
|
|
5131
|
+
|
|
5132
|
+
**Standard Step Definition:**
|
|
5133
|
+
\`\`\`typescript
|
|
5134
|
+
const myStep = createStep({
|
|
5135
|
+
id: "step-id", // Required: unique identifier
|
|
5136
|
+
description: "Step description", // Recommended for clarity
|
|
5137
|
+
inputSchema: z.object({...}), // Required: input validation
|
|
5138
|
+
outputSchema: z.object({...}), // Required: output validation
|
|
5139
|
+
execute: async ({ inputData, mastra, getStepResult, getInitData }) => {
|
|
5140
|
+
// Step logic here
|
|
5141
|
+
return { /* matches outputSchema */ };
|
|
5142
|
+
}
|
|
5143
|
+
});
|
|
5144
|
+
\`\`\`
|
|
5145
|
+
|
|
5146
|
+
**Execute Function Parameters:**
|
|
5147
|
+
- \`inputData\`: Validated input matching inputSchema
|
|
5148
|
+
- \`mastra\`: Access to Mastra instance (agents, tools, other workflows)
|
|
5149
|
+
- \`getStepResult(stepInstance)\`: Get results from previous steps
|
|
5150
|
+
- \`getInitData()\`: Access original workflow input data
|
|
5151
|
+
- \`runtimeContext\`: Runtime dependency injection context
|
|
5152
|
+
- \`runCount\`: Number of times this step has run (useful for retries)
|
|
5153
|
+
|
|
5154
|
+
### **\u{1F504} CONTROL FLOW METHODS**
|
|
5155
|
+
|
|
5156
|
+
**Sequential Execution:**
|
|
5157
|
+
- \`.then(step)\`: Execute steps one after another
|
|
5158
|
+
- Data flows automatically if schemas match
|
|
5159
|
+
|
|
5160
|
+
**Parallel Execution:**
|
|
5161
|
+
- \`.parallel([step1, step2])\`: Run steps simultaneously
|
|
5162
|
+
- All parallel steps complete before continuing
|
|
5163
|
+
|
|
5164
|
+
**Conditional Logic:**
|
|
5165
|
+
- \`.branch([[condition, step], [condition, step]])\`: Execute different steps based on conditions
|
|
5166
|
+
- Conditions evaluated sequentially, matching steps run in parallel
|
|
5167
|
+
|
|
5168
|
+
**Loops:**
|
|
5169
|
+
- \`.dountil(step, condition)\`: Repeat until condition becomes true
|
|
5170
|
+
- \`.dowhile(step, condition)\`: Repeat while condition is true
|
|
5171
|
+
- \`.foreach(step, {concurrency: N})\`: Execute step for each array item
|
|
5172
|
+
|
|
5173
|
+
**Data Transformation:**
|
|
5174
|
+
- \`.map(({ inputData, getStepResult, getInitData }) => transformedData)\`: Transform data between steps
|
|
5175
|
+
|
|
5176
|
+
### **\u23F8\uFE0F SUSPEND & RESUME CAPABILITIES**
|
|
5177
|
+
|
|
5178
|
+
**For Human-in-the-Loop Workflows:**
|
|
5179
|
+
\`\`\`typescript
|
|
5180
|
+
const userInputStep = createStep({
|
|
5181
|
+
id: "user-input",
|
|
5182
|
+
suspendSchema: z.object({}), // Schema for suspension payload
|
|
5183
|
+
resumeSchema: z.object({ // Schema for resume data
|
|
5184
|
+
userResponse: z.string()
|
|
5185
|
+
}),
|
|
5186
|
+
execute: async ({ resumeData, suspend }) => {
|
|
5187
|
+
if (!resumeData?.userResponse) {
|
|
5188
|
+
await suspend({}); // Pause workflow
|
|
5189
|
+
return { response: "" };
|
|
5190
|
+
}
|
|
5191
|
+
return { response: resumeData.userResponse };
|
|
5192
|
+
}
|
|
5193
|
+
});
|
|
5194
|
+
\`\`\`
|
|
5195
|
+
|
|
5196
|
+
**Resume Workflow:**
|
|
5197
|
+
\`\`\`typescript
|
|
5198
|
+
const result = await run.start({ inputData: {...} });
|
|
5199
|
+
if (result.status === "suspended") {
|
|
5200
|
+
await run.resume({
|
|
5201
|
+
step: result.suspended[0], // Or specific step ID
|
|
5202
|
+
resumeData: { userResponse: "answer" }
|
|
5203
|
+
});
|
|
5204
|
+
}
|
|
5205
|
+
\`\`\`
|
|
5206
|
+
|
|
5207
|
+
### **\u{1F6E0}\uFE0F INTEGRATING AGENTS & TOOLS**
|
|
5208
|
+
|
|
5209
|
+
**Using Agents in Steps:**
|
|
5210
|
+
\`\`\`typescript
|
|
5211
|
+
// Method 1: Agent as step
|
|
5212
|
+
const agentStep = createStep(myAgent);
|
|
5213
|
+
|
|
5214
|
+
// Method 2: Call agent in execute function
|
|
5215
|
+
const step = createStep({
|
|
5216
|
+
execute: async ({ inputData }) => {
|
|
5217
|
+
const result = await myAgent.generate(prompt);
|
|
5218
|
+
return { output: result.text };
|
|
5219
|
+
}
|
|
5220
|
+
});
|
|
5221
|
+
\`\`\`
|
|
5222
|
+
|
|
5223
|
+
**Using Tools in Steps:**
|
|
5224
|
+
\`\`\`typescript
|
|
5225
|
+
// Method 1: Tool as step
|
|
5226
|
+
const toolStep = createStep(myTool);
|
|
5227
|
+
|
|
5228
|
+
// Method 2: Call tool in execute function
|
|
5229
|
+
const step = createStep({
|
|
5230
|
+
execute: async ({ inputData, runtimeContext }) => {
|
|
5231
|
+
const result = await myTool.execute({
|
|
5232
|
+
context: inputData,
|
|
5233
|
+
runtimeContext
|
|
5234
|
+
});
|
|
5235
|
+
return result;
|
|
5236
|
+
}
|
|
5237
|
+
});
|
|
5238
|
+
\`\`\`
|
|
5239
|
+
|
|
5240
|
+
### **\u{1F5C2}\uFE0F PROJECT ORGANIZATION PATTERNS**
|
|
5241
|
+
|
|
5242
|
+
**MANDATORY Workflow Organization:**
|
|
5243
|
+
Each workflow MUST be organized in its own dedicated folder with separated concerns:
|
|
5244
|
+
|
|
5245
|
+
\`\`\`
|
|
5246
|
+
src/mastra/workflows/
|
|
5247
|
+
\u251C\u2500\u2500 my-workflow-name/ # Kebab-case folder name
|
|
5248
|
+
\u2502 \u251C\u2500\u2500 types.ts # All Zod schemas and TypeScript types
|
|
5249
|
+
\u2502 \u251C\u2500\u2500 steps.ts # All individual step definitions
|
|
5250
|
+
\u2502 \u251C\u2500\u2500 workflow.ts # Main workflow composition and export
|
|
5251
|
+
\u2502 \u2514\u2500\u2500 utils.ts # Helper functions (if needed)
|
|
5252
|
+
\u251C\u2500\u2500 another-workflow/
|
|
5253
|
+
\u2502 \u251C\u2500\u2500 types.ts
|
|
5254
|
+
\u2502 \u251C\u2500\u2500 steps.ts
|
|
5255
|
+
\u2502 \u251C\u2500\u2500 workflow.ts
|
|
5256
|
+
\u2502 \u2514\u2500\u2500 utils.ts
|
|
5257
|
+
\u2514\u2500\u2500 index.ts # Export all workflows
|
|
5258
|
+
\`\`\`
|
|
5259
|
+
|
|
5260
|
+
**CRITICAL File Organization Rules:**
|
|
5261
|
+
- **ALWAYS create a dedicated folder** for each workflow
|
|
5262
|
+
- **Folder names MUST be kebab-case** version of workflow name
|
|
5263
|
+
- **types.ts**: Define all input/output schemas, validation types, and interfaces
|
|
5264
|
+
- **steps.ts**: Create all individual step definitions using createStep()
|
|
5265
|
+
- **workflow.ts**: Compose steps into workflow using createWorkflow() and export the final workflow
|
|
5266
|
+
- **utils.ts**: Any helper functions, constants, or utilities (create only if needed)
|
|
5267
|
+
- **NEVER put everything in one file** - always separate concerns properly
|
|
5268
|
+
|
|
5269
|
+
**Workflow Registration:**
|
|
5270
|
+
\`\`\`typescript
|
|
5271
|
+
// src/mastra/index.ts
|
|
5272
|
+
export const mastra = new Mastra({
|
|
5273
|
+
workflows: {
|
|
5274
|
+
sendEmailWorkflow, // Use camelCase for keys
|
|
5275
|
+
dataProcessingWorkflow
|
|
5276
|
+
},
|
|
5277
|
+
storage: new LibSQLStore({ url: 'file:./mastra.db' }), // Required for suspend/resume
|
|
5278
|
+
});
|
|
5279
|
+
\`\`\`
|
|
5280
|
+
|
|
5281
|
+
### **\u{1F4E6} ESSENTIAL DEPENDENCIES**
|
|
5282
|
+
|
|
5283
|
+
**Required Packages:**
|
|
5284
|
+
\`\`\`json
|
|
5285
|
+
{
|
|
5286
|
+
"dependencies": {
|
|
5287
|
+
"@mastra/core": "latest",
|
|
5288
|
+
"zod": "^3.25.67"
|
|
5289
|
+
}
|
|
5290
|
+
}
|
|
5291
|
+
\`\`\`
|
|
5292
|
+
|
|
5293
|
+
**Additional Packages (as needed):**
|
|
5294
|
+
- \`@mastra/libsql\`: For workflow state persistence
|
|
5295
|
+
- \`@ai-sdk/openai\`: For AI model integration
|
|
5296
|
+
- \`ai\`: For AI SDK functionality
|
|
5297
|
+
|
|
5298
|
+
### **\u2705 WORKFLOW BEST PRACTICES**
|
|
5299
|
+
|
|
5300
|
+
**Schema Design:**
|
|
5301
|
+
- Use descriptive property names in schemas
|
|
5302
|
+
- Make schemas as specific as possible (avoid \`z.any()\`)
|
|
5303
|
+
- Include validation for required business logic
|
|
5304
|
+
|
|
5305
|
+
**Error Handling:**
|
|
5306
|
+
- Use \`try/catch\` blocks in step execute functions
|
|
5307
|
+
- Return meaningful error messages
|
|
5308
|
+
- Consider using \`bail()\` for early successful exits
|
|
5309
|
+
|
|
5310
|
+
**Step Organization:**
|
|
5311
|
+
- Keep steps focused on single responsibilities
|
|
5312
|
+
- Use descriptive step IDs (kebab-case recommended)
|
|
5313
|
+
- Create reusable steps for common operations
|
|
5314
|
+
|
|
5315
|
+
**Data Flow:**
|
|
5316
|
+
- Use \`.map()\` when schemas don't align between steps
|
|
5317
|
+
- Access previous step results with \`getStepResult(stepInstance)\`
|
|
5318
|
+
- Use \`getInitData()\` to access original workflow input
|
|
5319
|
+
|
|
5320
|
+
### **\u{1F680} EXECUTION PATTERNS**
|
|
5321
|
+
|
|
5322
|
+
**Running Workflows:**
|
|
5323
|
+
\`\`\`typescript
|
|
5324
|
+
// Create and start run
|
|
5325
|
+
const run = await workflow.createRunAsync();
|
|
5326
|
+
const result = await run.start({ inputData: {...} });
|
|
5327
|
+
|
|
5328
|
+
// Stream execution for real-time monitoring
|
|
5329
|
+
const stream = await run.streamVNext({ inputData: {...} });
|
|
5330
|
+
for await (const chunk of stream) {
|
|
5331
|
+
console.log(chunk);
|
|
5332
|
+
}
|
|
5333
|
+
|
|
5334
|
+
// Watch for events
|
|
5335
|
+
run.watch((event) => console.log(event));
|
|
5336
|
+
\`\`\`
|
|
5337
|
+
|
|
5338
|
+
**Workflow Status Types:**
|
|
5339
|
+
- \`"success"\`: Completed successfully
|
|
5340
|
+
- \`"suspended"\`: Paused awaiting input
|
|
5341
|
+
- \`"failed"\`: Encountered error
|
|
5342
|
+
|
|
5343
|
+
### **\u{1F517} ADVANCED FEATURES**
|
|
5344
|
+
|
|
5345
|
+
**Nested Workflows:**
|
|
5346
|
+
- Use workflows as steps: \`.then(otherWorkflow)\`
|
|
5347
|
+
- Enable complex workflow composition
|
|
5348
|
+
|
|
5349
|
+
**Runtime Context:**
|
|
5350
|
+
- Pass shared data across all steps
|
|
5351
|
+
- Enable dependency injection patterns
|
|
5352
|
+
|
|
5353
|
+
**Streaming & Events:**
|
|
5354
|
+
- Real-time workflow monitoring
|
|
5355
|
+
- Integration with external event systems
|
|
5356
|
+
|
|
5357
|
+
**Cloning:**
|
|
5358
|
+
- \`cloneWorkflow(original, {id: "new-id"})\`: Reuse workflow structure
|
|
5359
|
+
- \`cloneStep(original, {id: "new-id"})\`: Reuse step logic
|
|
5360
|
+
|
|
5361
|
+
This comprehensive research provides the foundation for creating robust, maintainable Mastra workflows with proper typing, error handling, and architectural patterns.
|
|
5362
|
+
`;
|
|
5363
|
+
var workflowBuilderPrompts = {
|
|
5364
|
+
researchAgent: {
|
|
5365
|
+
instructions: `You are a Mastra workflow research expert. Your task is to gather relevant information about creating Mastra workflows.
|
|
5366
|
+
|
|
5367
|
+
RESEARCH OBJECTIVES:
|
|
5368
|
+
1. **Core Concepts**: Understand how Mastra workflows work
|
|
5369
|
+
2. **Best Practices**: Learn workflow patterns and conventions
|
|
5370
|
+
3. **Code Examples**: Find relevant implementation examples
|
|
5371
|
+
4. **Technical Details**: Understand schemas, steps, and configuration
|
|
5372
|
+
|
|
5373
|
+
Use the available documentation and examples tools to gather comprehensive information about Mastra workflows.`,
|
|
5374
|
+
prompt: (context) => `Research everything about Mastra workflows to help create or edit them effectively.
|
|
5375
|
+
|
|
5376
|
+
PROJECT CONTEXT:
|
|
5377
|
+
- Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
|
|
5378
|
+
- Dependencies: ${JSON.stringify(context.dependencies, null, 2)}
|
|
5379
|
+
- Has Workflows Directory: ${context.hasWorkflowsDir}
|
|
5380
|
+
|
|
5381
|
+
Focus on:
|
|
5382
|
+
1. How to create workflows using createWorkflow()
|
|
5383
|
+
2. How to create and chain workflow steps
|
|
5384
|
+
3. Best practices for workflow organization
|
|
5385
|
+
4. Common workflow patterns and examples
|
|
5386
|
+
5. Schema definitions and types
|
|
5387
|
+
6. Error handling and debugging
|
|
5388
|
+
|
|
5389
|
+
Use the docs and examples tools to gather comprehensive information.`
|
|
5390
|
+
},
|
|
5391
|
+
executionAgent: {
|
|
5392
|
+
instructions: (context) => `You are executing a workflow ${context.action} task for: "${context.workflowName}"
|
|
5393
|
+
|
|
5394
|
+
CRITICAL WORKFLOW EXECUTION REQUIREMENTS:
|
|
5395
|
+
1. **EXPLORE PROJECT STRUCTURE FIRST**: Use listDirectory and readFile tools to understand the existing project layout, folder structure, and conventions before creating any files
|
|
5396
|
+
2. **FOLLOW PROJECT CONVENTIONS**: Look at existing workflows, agents, and file structures to understand where new files should be placed (typically src/mastra/workflows/, src/mastra/agents/, etc.)
|
|
5397
|
+
3. **USE PRE-LOADED TASK LIST**: Your task list has been pre-populated in the taskManager tool. Use taskManager with action 'list' to see all tasks, and action 'update' to mark progress
|
|
5398
|
+
4. **COMPLETE EVERY SINGLE TASK**: You MUST complete ALL ${context.tasksLength} tasks that are already in the taskManager. Do not stop until every task is marked as 'completed'
|
|
5399
|
+
5. **Follow Task Dependencies**: Execute tasks in the correct order, respecting dependencies
|
|
5400
|
+
6. **Request User Input When Needed**: If you encounter choices (like email providers, databases, etc.) that require user decision, return questions for clarification
|
|
5401
|
+
7. **STRICT WORKFLOW ORGANIZATION**: When creating or editing workflows, you MUST follow this exact structure
|
|
5402
|
+
|
|
5403
|
+
MANDATORY WORKFLOW FOLDER STRUCTURE:
|
|
5404
|
+
When ${context.action === "create" ? "creating a new workflow" : "editing a workflow"}, you MUST organize files as follows:
|
|
5405
|
+
|
|
5406
|
+
\u{1F4C1} src/mastra/workflows/${context.workflowName?.toLowerCase().replace(/[^a-z0-9]/g, "-") || "new-workflow"}/
|
|
5407
|
+
\u251C\u2500\u2500 \u{1F4C4} types.ts # All Zod schemas and TypeScript types
|
|
5408
|
+
\u251C\u2500\u2500 \u{1F4C4} steps.ts # All individual step definitions
|
|
5409
|
+
\u251C\u2500\u2500 \u{1F4C4} workflow.ts # Main workflow composition and export
|
|
5410
|
+
\u2514\u2500\u2500 \u{1F4C4} utils.ts # Helper functions (if needed)
|
|
5411
|
+
|
|
5412
|
+
CRITICAL FILE ORGANIZATION RULES:
|
|
5413
|
+
- **ALWAYS create a dedicated folder** for the workflow in src/mastra/workflows/
|
|
5414
|
+
- **Folder name MUST be kebab-case** version of workflow name
|
|
5415
|
+
- **types.ts**: Define all input/output schemas, validation types, and interfaces
|
|
5416
|
+
- **steps.ts**: Create all individual step definitions using createStep()
|
|
5417
|
+
- **workflow.ts**: Compose steps into workflow using createWorkflow() and export the final workflow
|
|
5418
|
+
- **utils.ts**: Any helper functions, constants, or utilities (create only if needed)
|
|
5419
|
+
- **NEVER put everything in one file** - always separate concerns properly
|
|
5420
|
+
|
|
5421
|
+
CRITICAL COMPLETION REQUIREMENTS:
|
|
5422
|
+
- ALWAYS explore the directory structure before creating files to understand where they should go
|
|
5423
|
+
- You MUST complete ALL ${context.tasksLength} tasks before returning status='completed'
|
|
5424
|
+
- Use taskManager tool with action 'list' to see your current task list and action 'update' to mark tasks as 'in_progress' or 'completed'
|
|
5425
|
+
- If you need to make any decisions during implementation (choosing providers, configurations, etc.), return questions for user clarification
|
|
5426
|
+
- DO NOT make assumptions about file locations - explore first!
|
|
5427
|
+
- You cannot finish until ALL tasks in the taskManager are marked as 'completed'
|
|
5428
|
+
|
|
5429
|
+
PROJECT CONTEXT:
|
|
5430
|
+
- Action: ${context.action}
|
|
5431
|
+
- Workflow Name: ${context.workflowName}
|
|
5432
|
+
- Project Path: ${context.currentProjectPath}
|
|
5433
|
+
- Discovered Workflows: ${JSON.stringify(context.discoveredWorkflows, null, 2)}
|
|
5434
|
+
- Project Structure: ${JSON.stringify(context.projectStructure, null, 2)}
|
|
5435
|
+
|
|
5436
|
+
AVAILABLE RESEARCH:
|
|
5437
|
+
${JSON.stringify(context.research, null, 2)}
|
|
5438
|
+
|
|
5439
|
+
PRE-LOADED TASK LIST (${context.tasksLength} tasks already in taskManager):
|
|
5440
|
+
${context.tasks.map((task) => `- ${task.id}: ${task.content} (Priority: ${task.priority})`).join("\n")}
|
|
5441
|
+
|
|
5442
|
+
${context.resumeData ? `USER PROVIDED ANSWERS: ${JSON.stringify(context.resumeData.answers, null, 2)}` : ""}
|
|
5443
|
+
|
|
5444
|
+
Start by exploring the project structure, then use 'taskManager' with action 'list' to see your pre-loaded tasks, and work through each task systematically.`,
|
|
5445
|
+
prompt: (context) => context.resumeData ? `Continue working on the task list. The user has provided answers to your questions: ${JSON.stringify(context.resumeData.answers, null, 2)}.
|
|
5446
|
+
|
|
5447
|
+
CRITICAL: You must complete ALL ${context.tasks.length} tasks that are pre-loaded in the taskManager. Use the taskManager tool with action 'list' to check your progress and continue with the next tasks. Do not stop until every single task is marked as 'completed'.` : `Begin executing the pre-loaded task list to ${context.action} the workflow "${context.workflowName}".
|
|
5448
|
+
|
|
5449
|
+
CRITICAL REQUIREMENTS:
|
|
5450
|
+
- Your ${context.tasks.length} tasks have been PRE-LOADED into the taskManager tool
|
|
5451
|
+
- Start by exploring the project directory structure using listDirectory and readFile tools to understand:
|
|
5452
|
+
- Where workflows are typically stored (look for src/mastra/workflows/ or similar)
|
|
5453
|
+
- What the existing file structure looks like
|
|
5454
|
+
- How other workflows are organized and named
|
|
5455
|
+
- Where agent files are stored if needed
|
|
5456
|
+
- Then use taskManager with action 'list' to see your pre-loaded tasks
|
|
5457
|
+
- Use taskManager with action 'update' to mark tasks as 'in_progress' or 'completed'
|
|
5458
|
+
|
|
5459
|
+
CRITICAL FILE ORGANIZATION RULES:
|
|
5460
|
+
- **ALWAYS create a dedicated folder** for the workflow in src/mastra/workflows/
|
|
5461
|
+
- **Folder name MUST be kebab-case** version of workflow name
|
|
5462
|
+
- **NEVER put everything in one file** - separate types, steps, and workflow composition
|
|
5463
|
+
- Follow the 4-file structure above for maximum maintainability and clarity
|
|
5464
|
+
|
|
5465
|
+
- DO NOT return status='completed' until ALL ${context.tasks.length} tasks are marked as 'completed' in the taskManager
|
|
5466
|
+
|
|
5467
|
+
PRE-LOADED TASKS (${context.tasks.length} total tasks in taskManager):
|
|
5468
|
+
${context.tasks.map((task, index) => `${index + 1}. [${task.id}] ${task.content}`).join("\n")}
|
|
5469
|
+
|
|
5470
|
+
Use taskManager with action 'list' to see the current status of all tasks. You must complete every single one before finishing.`,
|
|
5471
|
+
iterationPrompt: (context) => `Continue working on the remaining tasks. You have already completed these tasks: [${context.completedTasks.map((t) => t.id).join(", ")}]
|
|
5472
|
+
|
|
5473
|
+
REMAINING TASKS TO COMPLETE (${context.pendingTasks.length} tasks):
|
|
5474
|
+
${context.pendingTasks.map((task, index) => `${index + 1}. [${task.id}] ${task.content}`).join("\n")}
|
|
5475
|
+
|
|
5476
|
+
CRITICAL: You must complete ALL of these remaining ${context.pendingTasks.length} tasks. Use taskManager with action 'list' to check current status and action 'update' to mark tasks as completed.
|
|
5477
|
+
|
|
5478
|
+
${context.resumeData ? `USER PROVIDED ANSWERS: ${JSON.stringify(context.resumeData.answers, null, 2)}` : ""}`
|
|
5479
|
+
},
|
|
5480
|
+
validation: {
|
|
5481
|
+
instructions: `CRITICAL VALIDATION INSTRUCTIONS:
|
|
5482
|
+
- When using the validateCode tool, ALWAYS pass the specific files you created or modified using the 'files' parameter
|
|
5483
|
+
- The tool uses a hybrid validation approach: fast syntax checking \u2192 semantic type checking \u2192 ESLint
|
|
5484
|
+
- This is much faster than full project compilation and only shows errors from your specific files
|
|
5485
|
+
- Example: validateCode({ validationType: ['types', 'lint'], files: ['src/workflows/my-workflow.ts', 'src/agents/my-agent.ts'] })
|
|
5486
|
+
- ALWAYS validate after creating or modifying files to ensure they compile correctly`
|
|
5487
|
+
}
|
|
5488
|
+
};
|
|
5489
|
+
var restrictedTaskManager = createTool$1({
|
|
5490
|
+
id: "task-manager",
|
|
5491
|
+
description: "View and update your pre-loaded task list. You can only mark tasks as in_progress or completed, not create new tasks.",
|
|
5492
|
+
inputSchema: z.object({
|
|
5493
|
+
action: z.enum(["list", "update", "complete"]).describe("List tasks, update status, or mark complete - tasks are pre-loaded"),
|
|
5494
|
+
tasks: z.array(
|
|
5495
|
+
z.object({
|
|
5496
|
+
id: z.string().describe("Task ID - must match existing task"),
|
|
5497
|
+
content: z.string().optional().describe("Task content (read-only)"),
|
|
5498
|
+
status: z.enum(["pending", "in_progress", "completed", "blocked"]).describe("Task status"),
|
|
5499
|
+
priority: z.enum(["high", "medium", "low"]).optional().describe("Task priority (read-only)"),
|
|
5500
|
+
dependencies: z.array(z.string()).optional().describe("Task dependencies (read-only)"),
|
|
5501
|
+
notes: z.string().optional().describe("Additional notes or progress updates")
|
|
5502
|
+
})
|
|
5503
|
+
).optional().describe("Tasks to update (status and notes only)"),
|
|
5504
|
+
taskId: z.string().optional().describe("Specific task ID for single task operations")
|
|
5505
|
+
}),
|
|
5506
|
+
outputSchema: z.object({
|
|
5507
|
+
success: z.boolean(),
|
|
5508
|
+
tasks: z.array(
|
|
5509
|
+
z.object({
|
|
5510
|
+
id: z.string(),
|
|
5511
|
+
content: z.string(),
|
|
5512
|
+
status: z.string(),
|
|
5513
|
+
priority: z.string(),
|
|
5514
|
+
dependencies: z.array(z.string()).optional(),
|
|
5515
|
+
notes: z.string().optional(),
|
|
5516
|
+
createdAt: z.string(),
|
|
5517
|
+
updatedAt: z.string()
|
|
5518
|
+
})
|
|
5519
|
+
),
|
|
5520
|
+
message: z.string()
|
|
5521
|
+
}),
|
|
5522
|
+
execute: async ({ context }) => {
|
|
5523
|
+
const adaptedContext = {
|
|
5524
|
+
...context,
|
|
5525
|
+
action: context.action,
|
|
5526
|
+
tasks: context.tasks?.map((task) => ({
|
|
5527
|
+
...task,
|
|
5528
|
+
priority: task.priority || "medium"
|
|
5529
|
+
}))
|
|
5530
|
+
};
|
|
5531
|
+
return await AgentBuilderDefaults.manageTaskList(adaptedContext);
|
|
5532
|
+
}
|
|
5533
|
+
});
|
|
5534
|
+
|
|
5535
|
+
// src/workflows/workflow-builder/workflow-builder.ts
|
|
5536
|
+
var workflowDiscoveryStep = createStep({
|
|
5537
|
+
id: "workflow-discovery",
|
|
5538
|
+
description: "Discover existing workflows in the project",
|
|
5539
|
+
inputSchema: WorkflowBuilderInputSchema,
|
|
5540
|
+
outputSchema: WorkflowDiscoveryResultSchema,
|
|
5541
|
+
execute: async ({ inputData, runtimeContext: _runtimeContext }) => {
|
|
5542
|
+
console.log("Starting workflow discovery...");
|
|
5543
|
+
const { projectPath = process.cwd() } = inputData;
|
|
5544
|
+
try {
|
|
5545
|
+
const workflowsPath = join(projectPath, "src/mastra/workflows");
|
|
5546
|
+
if (!existsSync(workflowsPath)) {
|
|
5547
|
+
console.log("No workflows directory found");
|
|
5548
|
+
return {
|
|
5549
|
+
success: true,
|
|
5550
|
+
workflows: [],
|
|
5551
|
+
mastraIndexExists: existsSync(join(projectPath, "src/mastra/index.ts")),
|
|
5552
|
+
message: "No existing workflows found in the project"
|
|
5553
|
+
};
|
|
5554
|
+
}
|
|
5555
|
+
const workflowFiles = await readdir(workflowsPath);
|
|
5556
|
+
const workflows = [];
|
|
5557
|
+
for (const fileName of workflowFiles) {
|
|
5558
|
+
if (fileName.endsWith(".ts") && !fileName.endsWith(".test.ts")) {
|
|
5559
|
+
const filePath = join(workflowsPath, fileName);
|
|
5560
|
+
try {
|
|
5561
|
+
const content = await readFile(filePath, "utf-8");
|
|
5562
|
+
const nameMatch = content.match(/createWorkflow\s*\(\s*{\s*id:\s*['"]([^'"]+)['"]/);
|
|
5563
|
+
const descMatch = content.match(/description:\s*['"]([^'"]*)['"]/);
|
|
5564
|
+
if (nameMatch && nameMatch[1]) {
|
|
5565
|
+
workflows.push({
|
|
5566
|
+
name: nameMatch[1],
|
|
5567
|
+
file: filePath,
|
|
5568
|
+
description: descMatch?.[1] ?? "No description available"
|
|
5569
|
+
});
|
|
5570
|
+
}
|
|
5571
|
+
} catch (error) {
|
|
5572
|
+
console.warn(`Failed to read workflow file ${filePath}:`, error);
|
|
5573
|
+
}
|
|
5574
|
+
}
|
|
5575
|
+
}
|
|
5576
|
+
console.log(`Discovered ${workflows.length} existing workflows`);
|
|
5577
|
+
return {
|
|
5578
|
+
success: true,
|
|
5579
|
+
workflows,
|
|
5580
|
+
mastraIndexExists: existsSync(join(projectPath, "src/mastra/index.ts")),
|
|
5581
|
+
message: workflows.length > 0 ? `Found ${workflows.length} existing workflow(s): ${workflows.map((w) => w.name).join(", ")}` : "No existing workflows found in the project"
|
|
5582
|
+
};
|
|
5583
|
+
} catch (error) {
|
|
5584
|
+
console.error("Workflow discovery failed:", error);
|
|
5585
|
+
return {
|
|
5586
|
+
success: false,
|
|
5587
|
+
workflows: [],
|
|
5588
|
+
mastraIndexExists: false,
|
|
5589
|
+
message: `Workflow discovery failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
5590
|
+
error: error instanceof Error ? error.message : String(error)
|
|
5591
|
+
};
|
|
5592
|
+
}
|
|
5593
|
+
}
|
|
5594
|
+
});
|
|
5595
|
+
var projectDiscoveryStep = createStep({
|
|
5596
|
+
id: "project-discovery",
|
|
5597
|
+
description: "Analyze the project structure and setup",
|
|
5598
|
+
inputSchema: WorkflowDiscoveryResultSchema,
|
|
5599
|
+
outputSchema: ProjectDiscoveryResultSchema,
|
|
5600
|
+
execute: async ({ inputData: _inputData, runtimeContext: _runtimeContext }) => {
|
|
5601
|
+
console.log("Starting project discovery...");
|
|
5602
|
+
try {
|
|
5603
|
+
const projectPath = process.cwd();
|
|
5604
|
+
const projectStructure = {
|
|
5605
|
+
hasPackageJson: existsSync(join(projectPath, "package.json")),
|
|
5606
|
+
hasMastraConfig: existsSync(join(projectPath, "mastra.config.js")) || existsSync(join(projectPath, "mastra.config.ts")),
|
|
5607
|
+
hasSrcDirectory: existsSync(join(projectPath, "src")),
|
|
5608
|
+
hasMastraDirectory: existsSync(join(projectPath, "src/mastra")),
|
|
5609
|
+
hasWorkflowsDirectory: existsSync(join(projectPath, "src/mastra/workflows")),
|
|
5610
|
+
hasToolsDirectory: existsSync(join(projectPath, "src/mastra/tools")),
|
|
5611
|
+
hasAgentsDirectory: existsSync(join(projectPath, "src/mastra/agents"))
|
|
5612
|
+
};
|
|
5613
|
+
let packageInfo = null;
|
|
5614
|
+
if (projectStructure.hasPackageJson) {
|
|
5615
|
+
try {
|
|
5616
|
+
const packageContent = await readFile(join(projectPath, "package.json"), "utf-8");
|
|
5617
|
+
packageInfo = JSON.parse(packageContent);
|
|
5618
|
+
} catch (error) {
|
|
5619
|
+
console.warn("Failed to read package.json:", error);
|
|
5620
|
+
}
|
|
5621
|
+
}
|
|
5622
|
+
console.log("Project discovery completed");
|
|
5623
|
+
return {
|
|
5624
|
+
success: true,
|
|
5625
|
+
structure: {
|
|
5626
|
+
hasWorkflowsDir: projectStructure.hasWorkflowsDirectory,
|
|
5627
|
+
hasAgentsDir: projectStructure.hasAgentsDirectory,
|
|
5628
|
+
hasToolsDir: projectStructure.hasToolsDirectory,
|
|
5629
|
+
hasMastraIndex: existsSync(join(projectPath, "src/mastra/index.ts")),
|
|
5630
|
+
existingWorkflows: [],
|
|
5631
|
+
existingAgents: [],
|
|
5632
|
+
existingTools: []
|
|
5633
|
+
},
|
|
5634
|
+
dependencies: packageInfo?.dependencies || {},
|
|
5635
|
+
message: "Project discovery completed successfully"
|
|
5636
|
+
};
|
|
5637
|
+
} catch (error) {
|
|
5638
|
+
console.error("Project discovery failed:", error);
|
|
5639
|
+
return {
|
|
5640
|
+
success: false,
|
|
5641
|
+
structure: {
|
|
5642
|
+
hasWorkflowsDir: false,
|
|
5643
|
+
hasAgentsDir: false,
|
|
5644
|
+
hasToolsDir: false,
|
|
5645
|
+
hasMastraIndex: false,
|
|
5646
|
+
existingWorkflows: [],
|
|
5647
|
+
existingAgents: [],
|
|
5648
|
+
existingTools: []
|
|
5649
|
+
},
|
|
5650
|
+
dependencies: {},
|
|
5651
|
+
message: "Project discovery failed",
|
|
5652
|
+
error: error instanceof Error ? error.message : String(error)
|
|
5653
|
+
};
|
|
5654
|
+
}
|
|
5655
|
+
}
|
|
5656
|
+
});
|
|
5657
|
+
var workflowResearchStep = createStep({
|
|
5658
|
+
id: "workflow-research",
|
|
5659
|
+
description: "Research Mastra workflows and gather relevant documentation",
|
|
5660
|
+
inputSchema: ProjectDiscoveryResultSchema,
|
|
5661
|
+
outputSchema: WorkflowResearchResultSchema,
|
|
5662
|
+
execute: async ({ inputData, runtimeContext }) => {
|
|
5663
|
+
console.log("Starting workflow research...");
|
|
5664
|
+
try {
|
|
5665
|
+
const researchAgent = new Agent({
|
|
5666
|
+
model: resolveModel(runtimeContext),
|
|
5667
|
+
instructions: workflowBuilderPrompts.researchAgent.instructions,
|
|
5668
|
+
name: "Workflow Research Agent"
|
|
5669
|
+
// tools: filteredMcpTools,
|
|
5670
|
+
});
|
|
5671
|
+
const researchPrompt = workflowBuilderPrompts.researchAgent.prompt({
|
|
5672
|
+
projectStructure: inputData.structure,
|
|
5673
|
+
dependencies: inputData.dependencies,
|
|
5674
|
+
hasWorkflowsDir: inputData.structure.hasWorkflowsDir
|
|
5675
|
+
});
|
|
5676
|
+
const result = await researchAgent.generateVNext(researchPrompt, {
|
|
5677
|
+
output: WorkflowResearchResultSchema
|
|
5678
|
+
// stopWhen: stepCountIs(10),
|
|
5679
|
+
});
|
|
5680
|
+
const researchResult = await result.object;
|
|
5681
|
+
if (!researchResult) {
|
|
5682
|
+
return {
|
|
5683
|
+
success: false,
|
|
5684
|
+
documentation: {
|
|
5685
|
+
workflowPatterns: [],
|
|
5686
|
+
stepExamples: [],
|
|
5687
|
+
bestPractices: []
|
|
5688
|
+
},
|
|
5689
|
+
webResources: [],
|
|
5690
|
+
message: "Research agent failed to generate valid response",
|
|
5691
|
+
error: "Research agent failed to generate valid response"
|
|
5692
|
+
};
|
|
5693
|
+
}
|
|
5694
|
+
console.log("Research completed successfully");
|
|
5695
|
+
return {
|
|
5696
|
+
success: true,
|
|
5697
|
+
documentation: {
|
|
5698
|
+
workflowPatterns: researchResult.documentation.workflowPatterns,
|
|
5699
|
+
stepExamples: researchResult.documentation.stepExamples,
|
|
5700
|
+
bestPractices: researchResult.documentation.bestPractices
|
|
5701
|
+
},
|
|
5702
|
+
webResources: researchResult.webResources,
|
|
5703
|
+
message: "Research completed successfully"
|
|
5704
|
+
};
|
|
5705
|
+
} catch (error) {
|
|
5706
|
+
console.error("Workflow research failed:", error);
|
|
5707
|
+
return {
|
|
5708
|
+
success: false,
|
|
5709
|
+
documentation: {
|
|
5710
|
+
workflowPatterns: [],
|
|
5711
|
+
stepExamples: [],
|
|
5712
|
+
bestPractices: []
|
|
5713
|
+
},
|
|
5714
|
+
webResources: [],
|
|
5715
|
+
message: "Research failed",
|
|
5716
|
+
error: error instanceof Error ? error.message : String(error)
|
|
5717
|
+
};
|
|
5718
|
+
}
|
|
5719
|
+
}
|
|
5720
|
+
});
|
|
5721
|
+
var taskExecutionStep = createStep({
|
|
5722
|
+
id: "task-execution",
|
|
5723
|
+
description: "Execute the approved task list to create or edit the workflow",
|
|
5724
|
+
inputSchema: TaskExecutionInputSchema,
|
|
5725
|
+
outputSchema: TaskExecutionResultSchema,
|
|
5726
|
+
suspendSchema: TaskExecutionSuspendSchema,
|
|
5727
|
+
resumeSchema: TaskExecutionResumeSchema,
|
|
5728
|
+
execute: async ({ inputData, resumeData, suspend, runtimeContext }) => {
|
|
5729
|
+
const {
|
|
5730
|
+
action,
|
|
5731
|
+
workflowName,
|
|
5732
|
+
description: _description,
|
|
5733
|
+
requirements: _requirements,
|
|
5734
|
+
tasks,
|
|
5735
|
+
discoveredWorkflows,
|
|
5736
|
+
projectStructure,
|
|
5737
|
+
research,
|
|
5738
|
+
projectPath
|
|
5739
|
+
} = inputData;
|
|
5740
|
+
console.log(`Starting task execution for ${action}ing workflow: ${workflowName}`);
|
|
5741
|
+
console.log(`Executing ${tasks.length} tasks using AgentBuilder stream...`);
|
|
5742
|
+
try {
|
|
5743
|
+
const currentProjectPath = projectPath || process.cwd();
|
|
5744
|
+
console.log("Pre-populating taskManager with planned tasks...");
|
|
5745
|
+
const taskManagerContext = {
|
|
5746
|
+
action: "create",
|
|
5747
|
+
tasks: tasks.map((task) => ({
|
|
5748
|
+
id: task.id,
|
|
5749
|
+
content: task.content,
|
|
5750
|
+
status: "pending",
|
|
5751
|
+
priority: task.priority,
|
|
5752
|
+
dependencies: task.dependencies,
|
|
5753
|
+
notes: task.notes
|
|
5754
|
+
}))
|
|
5755
|
+
};
|
|
5756
|
+
const taskManagerResult = await AgentBuilderDefaults.manageTaskList(taskManagerContext);
|
|
5757
|
+
console.log(`Task manager initialized with ${taskManagerResult.tasks.length} tasks`);
|
|
5758
|
+
if (!taskManagerResult.success) {
|
|
5759
|
+
throw new Error(`Failed to initialize task manager: ${taskManagerResult.message}`);
|
|
5760
|
+
}
|
|
5761
|
+
const executionAgent = new AgentBuilder({
|
|
5762
|
+
projectPath: currentProjectPath,
|
|
5763
|
+
model: resolveModel(runtimeContext),
|
|
5764
|
+
tools: {
|
|
5765
|
+
"task-manager": restrictedTaskManager
|
|
5766
|
+
},
|
|
5767
|
+
instructions: `${workflowBuilderPrompts.executionAgent.instructions({
|
|
5768
|
+
action,
|
|
5769
|
+
workflowName,
|
|
5770
|
+
tasksLength: tasks.length,
|
|
5771
|
+
currentProjectPath,
|
|
5772
|
+
discoveredWorkflows,
|
|
5773
|
+
projectStructure,
|
|
5774
|
+
research,
|
|
5775
|
+
tasks,
|
|
5776
|
+
resumeData
|
|
5777
|
+
})}
|
|
5778
|
+
|
|
5779
|
+
${workflowBuilderPrompts.validation.instructions}`
|
|
5780
|
+
});
|
|
5781
|
+
const executionPrompt = workflowBuilderPrompts.executionAgent.prompt({
|
|
5782
|
+
action,
|
|
5783
|
+
workflowName,
|
|
5784
|
+
tasks,
|
|
5785
|
+
resumeData
|
|
5786
|
+
});
|
|
5787
|
+
const originalInstructions = await executionAgent.getInstructions({ runtimeContext });
|
|
5788
|
+
const additionalInstructions = executionAgent.instructions;
|
|
5789
|
+
let enhancedInstructions = originalInstructions;
|
|
5790
|
+
if (additionalInstructions) {
|
|
5791
|
+
enhancedInstructions = `${originalInstructions}
|
|
5792
|
+
|
|
5793
|
+
${additionalInstructions}`;
|
|
5794
|
+
}
|
|
5795
|
+
const enhancedOptions = {
|
|
5796
|
+
stopWhen: stepCountIs(100),
|
|
5797
|
+
temperature: 0.3,
|
|
5798
|
+
instructions: enhancedInstructions
|
|
5799
|
+
};
|
|
5800
|
+
let finalResult = null;
|
|
5801
|
+
let allTasksCompleted = false;
|
|
5802
|
+
let iterationCount = 0;
|
|
5803
|
+
const maxIterations = 5;
|
|
5804
|
+
const expectedTaskIds = tasks.map((task) => task.id);
|
|
5805
|
+
while (!allTasksCompleted && iterationCount < maxIterations) {
|
|
5806
|
+
iterationCount++;
|
|
5807
|
+
const currentTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
|
|
5808
|
+
const completedTasks = currentTaskStatus.tasks.filter((task) => task.status === "completed");
|
|
5809
|
+
const pendingTasks = currentTaskStatus.tasks.filter((task) => task.status !== "completed");
|
|
5810
|
+
console.log(`
|
|
5811
|
+
=== EXECUTION ITERATION ${iterationCount} ===`);
|
|
5812
|
+
console.log(`Completed tasks: ${completedTasks.length}/${expectedTaskIds.length}`);
|
|
5813
|
+
console.log(`Remaining tasks: ${pendingTasks.map((t) => t.id).join(", ")}`);
|
|
5814
|
+
allTasksCompleted = pendingTasks.length === 0;
|
|
5815
|
+
if (allTasksCompleted) {
|
|
5816
|
+
console.log("All tasks completed! Breaking execution loop.");
|
|
5817
|
+
break;
|
|
5818
|
+
}
|
|
5819
|
+
const iterationPrompt = iterationCount === 1 ? executionPrompt : `${workflowBuilderPrompts.executionAgent.iterationPrompt({
|
|
5820
|
+
completedTasks,
|
|
5821
|
+
pendingTasks,
|
|
5822
|
+
workflowName,
|
|
5823
|
+
resumeData
|
|
5824
|
+
})}
|
|
5825
|
+
|
|
5826
|
+
${workflowBuilderPrompts.validation.instructions}`;
|
|
5827
|
+
const stream = await executionAgent.streamVNext(iterationPrompt, {
|
|
5828
|
+
structuredOutput: {
|
|
5829
|
+
schema: TaskExecutionIterationInputSchema(tasks.length),
|
|
5830
|
+
model: resolveModel(runtimeContext)
|
|
5831
|
+
},
|
|
5832
|
+
...enhancedOptions
|
|
5833
|
+
});
|
|
5834
|
+
let finalMessage = "";
|
|
5835
|
+
for await (const chunk of stream.fullStream) {
|
|
5836
|
+
if (chunk.type === "text-delta") {
|
|
5837
|
+
finalMessage += chunk.payload.text;
|
|
5838
|
+
}
|
|
5839
|
+
if (chunk.type === "step-finish") {
|
|
5840
|
+
console.log(finalMessage);
|
|
5841
|
+
finalMessage = "";
|
|
5842
|
+
}
|
|
5843
|
+
if (chunk.type === "tool-result") {
|
|
5844
|
+
console.log(JSON.stringify(chunk, null, 2));
|
|
5845
|
+
}
|
|
5846
|
+
if (chunk.type === "finish") {
|
|
5847
|
+
console.log(chunk);
|
|
5848
|
+
}
|
|
5849
|
+
}
|
|
5850
|
+
await stream.consumeStream();
|
|
5851
|
+
finalResult = await stream.object;
|
|
5852
|
+
console.log(`Iteration ${iterationCount} result:`, { finalResult });
|
|
5853
|
+
if (!finalResult) {
|
|
5854
|
+
throw new Error(`No result received from agent execution on iteration ${iterationCount}`);
|
|
5855
|
+
}
|
|
5856
|
+
const postIterationTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
|
|
5857
|
+
const postCompletedTasks = postIterationTaskStatus.tasks.filter((task) => task.status === "completed");
|
|
5858
|
+
const postPendingTasks = postIterationTaskStatus.tasks.filter((task) => task.status !== "completed");
|
|
5859
|
+
allTasksCompleted = postPendingTasks.length === 0;
|
|
5860
|
+
console.log(
|
|
5861
|
+
`After iteration ${iterationCount}: ${postCompletedTasks.length}/${expectedTaskIds.length} tasks completed in taskManager`
|
|
5862
|
+
);
|
|
5863
|
+
if (finalResult.status === "needs_clarification" && finalResult.questions && finalResult.questions.length > 0) {
|
|
5864
|
+
console.log(
|
|
5865
|
+
`Agent needs clarification on iteration ${iterationCount}: ${finalResult.questions.length} questions`
|
|
5866
|
+
);
|
|
5867
|
+
break;
|
|
5868
|
+
}
|
|
5869
|
+
if (finalResult.status === "completed" && !allTasksCompleted) {
|
|
5870
|
+
console.log(
|
|
5871
|
+
`Agent claimed completion but taskManager shows pending tasks: ${postPendingTasks.map((t) => t.id).join(", ")}`
|
|
5872
|
+
);
|
|
5873
|
+
}
|
|
5874
|
+
}
|
|
5875
|
+
if (iterationCount >= maxIterations && !allTasksCompleted) {
|
|
5876
|
+
finalResult.error = `Maximum iterations (${maxIterations}) reached but not all tasks completed`;
|
|
5877
|
+
finalResult.status = "in_progress";
|
|
5878
|
+
}
|
|
5879
|
+
if (!finalResult) {
|
|
5880
|
+
throw new Error("No result received from agent execution");
|
|
5881
|
+
}
|
|
5882
|
+
if (finalResult.status === "needs_clarification" && finalResult.questions && finalResult.questions.length > 0) {
|
|
5883
|
+
console.log(`Agent needs clarification: ${finalResult.questions.length} questions`);
|
|
5884
|
+
console.log("finalResult", JSON.stringify(finalResult, null, 2));
|
|
5885
|
+
return suspend({
|
|
5886
|
+
questions: finalResult.questions,
|
|
5887
|
+
currentProgress: finalResult.progress,
|
|
5888
|
+
completedTasks: finalResult.completedTasks || [],
|
|
5889
|
+
message: finalResult.message
|
|
5890
|
+
});
|
|
5891
|
+
}
|
|
5892
|
+
const finalTaskStatus = await AgentBuilderDefaults.manageTaskList({ action: "list" });
|
|
5893
|
+
const finalCompletedTasks = finalTaskStatus.tasks.filter((task) => task.status === "completed");
|
|
5894
|
+
const finalPendingTasks = finalTaskStatus.tasks.filter((task) => task.status !== "completed");
|
|
5895
|
+
const tasksCompleted = finalCompletedTasks.length;
|
|
5896
|
+
const tasksExpected = expectedTaskIds.length;
|
|
5897
|
+
const finalAllTasksCompleted = finalPendingTasks.length === 0;
|
|
5898
|
+
const success = finalAllTasksCompleted && !finalResult.error;
|
|
5899
|
+
const message = success ? `Successfully completed workflow ${action} - all ${tasksExpected} tasks completed after ${iterationCount} iteration(s): ${finalResult.message}` : `Workflow execution finished with issues after ${iterationCount} iteration(s): ${finalResult.message}. Completed: ${tasksCompleted}/${tasksExpected} tasks`;
|
|
5900
|
+
console.log(message);
|
|
5901
|
+
const missingTasks = finalPendingTasks.map((task) => task.id);
|
|
5902
|
+
const validationErrors = [];
|
|
5903
|
+
if (finalResult.error) {
|
|
5904
|
+
validationErrors.push(finalResult.error);
|
|
5905
|
+
}
|
|
5906
|
+
if (!finalAllTasksCompleted) {
|
|
5907
|
+
validationErrors.push(
|
|
5908
|
+
`Incomplete tasks: ${missingTasks.join(", ")} (${tasksCompleted}/${tasksExpected} completed)`
|
|
5909
|
+
);
|
|
5910
|
+
}
|
|
5911
|
+
return {
|
|
5912
|
+
success,
|
|
5913
|
+
completedTasks: finalCompletedTasks.map((task) => task.id),
|
|
5914
|
+
filesModified: finalResult.filesModified || [],
|
|
5915
|
+
validationResults: {
|
|
5916
|
+
passed: success,
|
|
5917
|
+
errors: validationErrors,
|
|
5918
|
+
warnings: finalAllTasksCompleted ? [] : [`Missing ${missingTasks.length} tasks: ${missingTasks.join(", ")}`]
|
|
5919
|
+
},
|
|
5920
|
+
message,
|
|
5921
|
+
error: finalResult.error
|
|
5922
|
+
};
|
|
5923
|
+
} catch (error) {
|
|
5924
|
+
console.error("Task execution failed:", error);
|
|
5925
|
+
return {
|
|
5926
|
+
success: false,
|
|
5927
|
+
completedTasks: [],
|
|
5928
|
+
filesModified: [],
|
|
5929
|
+
validationResults: {
|
|
5930
|
+
passed: false,
|
|
5931
|
+
errors: [`Task execution failed: ${error instanceof Error ? error.message : String(error)}`],
|
|
5932
|
+
warnings: []
|
|
5933
|
+
},
|
|
5934
|
+
message: `Task execution failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
5935
|
+
error: error instanceof Error ? error.message : String(error)
|
|
5936
|
+
};
|
|
5937
|
+
}
|
|
5938
|
+
}
|
|
5939
|
+
});
|
|
5940
|
+
var workflowBuilderWorkflow = createWorkflow({
|
|
5941
|
+
id: "workflow-builder",
|
|
5942
|
+
description: "Create or edit Mastra workflows using AI-powered assistance with iterative planning",
|
|
5943
|
+
inputSchema: WorkflowBuilderInputSchema,
|
|
5944
|
+
outputSchema: WorkflowBuilderResultSchema,
|
|
5945
|
+
steps: [
|
|
5946
|
+
workflowDiscoveryStep,
|
|
5947
|
+
projectDiscoveryStep,
|
|
5948
|
+
workflowResearchStep,
|
|
5949
|
+
planningAndApprovalWorkflow,
|
|
5950
|
+
taskExecutionStep
|
|
5951
|
+
]
|
|
5952
|
+
}).then(workflowDiscoveryStep).then(projectDiscoveryStep).then(workflowResearchStep).map(async ({ getStepResult, getInitData }) => {
|
|
5953
|
+
const initData = getInitData();
|
|
5954
|
+
const discoveryResult = getStepResult(workflowDiscoveryStep);
|
|
5955
|
+
const projectResult = getStepResult(projectDiscoveryStep);
|
|
5956
|
+
return {
|
|
5957
|
+
action: initData.action,
|
|
5958
|
+
workflowName: initData.workflowName,
|
|
5959
|
+
description: initData.description,
|
|
5960
|
+
requirements: initData.requirements,
|
|
5961
|
+
discoveredWorkflows: discoveryResult.workflows,
|
|
5962
|
+
projectStructure: projectResult,
|
|
5963
|
+
// research: researchResult,
|
|
5964
|
+
research: workflowResearch,
|
|
5965
|
+
userAnswers: void 0
|
|
5966
|
+
};
|
|
5967
|
+
}).dountil(planningAndApprovalWorkflow, async ({ inputData }) => {
|
|
5968
|
+
console.log(`Sub-workflow check: approved=${inputData.approved}`);
|
|
5969
|
+
return inputData.approved === true;
|
|
5970
|
+
}).map(async ({ getStepResult, getInitData }) => {
|
|
5971
|
+
const initData = getInitData();
|
|
5972
|
+
const discoveryResult = getStepResult(workflowDiscoveryStep);
|
|
5973
|
+
const projectResult = getStepResult(projectDiscoveryStep);
|
|
5974
|
+
const subWorkflowResult = getStepResult(planningAndApprovalWorkflow);
|
|
5975
|
+
return {
|
|
5976
|
+
action: initData.action,
|
|
5977
|
+
workflowName: initData.workflowName,
|
|
5978
|
+
description: initData.description,
|
|
5979
|
+
requirements: initData.requirements,
|
|
5980
|
+
tasks: subWorkflowResult.tasks,
|
|
5981
|
+
discoveredWorkflows: discoveryResult.workflows,
|
|
5982
|
+
projectStructure: projectResult,
|
|
5983
|
+
// research: researchResult,
|
|
5984
|
+
research: workflowResearch,
|
|
5985
|
+
projectPath: initData.projectPath || process.cwd()
|
|
5986
|
+
};
|
|
5987
|
+
}).then(taskExecutionStep).commit();
|
|
5988
|
+
|
|
5989
|
+
// src/workflows/workflow-map.ts
|
|
5990
|
+
var agentBuilderWorkflows = {
|
|
5991
|
+
"merge-template": agentBuilderTemplateWorkflow,
|
|
5992
|
+
"workflow-builder": workflowBuilderWorkflow
|
|
5993
|
+
};
|
|
4220
5994
|
|
|
4221
5995
|
// src/agent/index.ts
|
|
4222
5996
|
var AgentBuilder = class extends Agent {
|
|
@@ -4240,9 +6014,7 @@ ${config.instructions}` : "";
|
|
|
4240
6014
|
...config.tools || {}
|
|
4241
6015
|
};
|
|
4242
6016
|
},
|
|
4243
|
-
workflows:
|
|
4244
|
-
"merge-template": agentBuilderTemplateWorkflow
|
|
4245
|
-
},
|
|
6017
|
+
workflows: agentBuilderWorkflows,
|
|
4246
6018
|
memory: new Memory({
|
|
4247
6019
|
options: AgentBuilderDefaults.DEFAULT_MEMORY_CONFIG,
|
|
4248
6020
|
processors: [
|
|
@@ -4345,4 +6117,6 @@ ${!options?.outputFormat || options.outputFormat === "both" ? "Provide both expl
|
|
|
4345
6117
|
}
|
|
4346
6118
|
};
|
|
4347
6119
|
|
|
4348
|
-
export { AgentBuilder, AgentBuilderDefaults, agentBuilderTemplateWorkflow, mergeTemplateBySlug };
|
|
6120
|
+
export { AgentBuilder, AgentBuilderDefaults, agentBuilderTemplateWorkflow, agentBuilderWorkflows, mergeTemplateBySlug, planningAndApprovalWorkflow, workflowBuilderWorkflow };
|
|
6121
|
+
//# sourceMappingURL=index.js.map
|
|
6122
|
+
//# sourceMappingURL=index.js.map
|