@leclabs/agent-flow-navigator-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +260 -0
- package/catalog/workflows/agile-task.json +130 -0
- package/catalog/workflows/bug-fix.json +153 -0
- package/catalog/workflows/context-optimization.json +130 -0
- package/catalog/workflows/feature-development.json +225 -0
- package/catalog/workflows/quick-task.json +115 -0
- package/catalog/workflows/test-coverage.json +153 -0
- package/catalog/workflows/ui-reconstruction.json +241 -0
- package/catalog.js +64 -0
- package/copier.js +179 -0
- package/diagram.js +146 -0
- package/dialog.js +63 -0
- package/engine.js +467 -0
- package/index.js +378 -0
- package/package.json +49 -0
- package/store.js +90 -0
- package/types.d.ts +133 -0
package/index.js
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Navigator MCP Server (Stateless) v2
|
|
4
|
+
*
|
|
5
|
+
* Provides stateless workflow navigation for the flow plugin.
|
|
6
|
+
* All task state is stored in Claude Code native /tasks via metadata.navigator.
|
|
7
|
+
*
|
|
8
|
+
* Tools:
|
|
9
|
+
* - Navigate: Unified navigation - start, get current, or advance
|
|
10
|
+
* - ListWorkflows: List available workflows
|
|
11
|
+
* - Diagram: Generate mermaid diagram for workflow
|
|
12
|
+
* - CopyWorkflows: Copy workflows from catalog to project
|
|
13
|
+
* - ListCatalog: List workflows available in catalog
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
17
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
18
|
+
import { CallToolRequestSchema, ListToolsRequestSchema } from "@modelcontextprotocol/sdk/types.js";
|
|
19
|
+
import { WorkflowEngine } from "./engine.js";
|
|
20
|
+
import { generateDiagram } from "./diagram.js";
|
|
21
|
+
import { WorkflowStore, validateWorkflow } from "./store.js";
|
|
22
|
+
import { buildWorkflowSelectionDialog } from "./dialog.js";
|
|
23
|
+
import {
|
|
24
|
+
generateFlowReadme,
|
|
25
|
+
generateWorkflowsReadme,
|
|
26
|
+
isValidWorkflowForCopy,
|
|
27
|
+
computeWorkflowsToCopy,
|
|
28
|
+
} from "./copier.js";
|
|
29
|
+
import { buildWorkflowSummary, buildCatalogResponse, buildEmptyCatalogResponse } from "./catalog.js";
|
|
30
|
+
import { readFileSync, existsSync, readdirSync, mkdirSync, writeFileSync } from "fs";
|
|
31
|
+
import { dirname, join, resolve } from "path";
|
|
32
|
+
import { fileURLToPath } from "url";
|
|
33
|
+
|
|
34
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
35
|
+
const CATALOG_PATH = join(__dirname, "catalog");
|
|
36
|
+
|
|
37
|
+
// Project root: from CLI arg (ignore flags starting with -), or current working directory
|
|
38
|
+
const cliArg = process.argv[2];
|
|
39
|
+
const PROJECT_ROOT = cliArg && !cliArg.startsWith("-") ? resolve(cliArg) : process.cwd();
|
|
40
|
+
const FLOW_PATH = join(PROJECT_ROOT, ".flow");
|
|
41
|
+
const WORKFLOWS_PATH = join(FLOW_PATH, "workflows");
|
|
42
|
+
const DIAGRAMS_PATH = join(FLOW_PATH, "diagrams");
|
|
43
|
+
|
|
44
|
+
const store = new WorkflowStore();
|
|
45
|
+
const engine = new WorkflowEngine(store);
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Load workflows from project directory structure: {id}/workflow.json
|
|
49
|
+
*/
|
|
50
|
+
function loadProjectWorkflows(dirPath) {
|
|
51
|
+
if (!existsSync(dirPath)) return [];
|
|
52
|
+
|
|
53
|
+
const loaded = [];
|
|
54
|
+
const entries = readdirSync(dirPath, { withFileTypes: true });
|
|
55
|
+
|
|
56
|
+
for (const entry of entries) {
|
|
57
|
+
if (!entry.isDirectory()) continue;
|
|
58
|
+
|
|
59
|
+
const id = entry.name;
|
|
60
|
+
const workflowFile = join(dirPath, id, "workflow.json");
|
|
61
|
+
|
|
62
|
+
if (!existsSync(workflowFile)) continue;
|
|
63
|
+
|
|
64
|
+
try {
|
|
65
|
+
const content = JSON.parse(readFileSync(workflowFile, "utf-8"));
|
|
66
|
+
if (validateWorkflow(id, content)) {
|
|
67
|
+
store.loadDefinition(id, content);
|
|
68
|
+
loaded.push(id);
|
|
69
|
+
}
|
|
70
|
+
} catch (e) {
|
|
71
|
+
console.error(`Error loading workflow ${id}: ${e.message}`);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return loaded;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Load workflows from catalog: flat {id}.json files
|
|
80
|
+
*/
|
|
81
|
+
function loadCatalogWorkflows(dirPath) {
|
|
82
|
+
if (!existsSync(dirPath)) return [];
|
|
83
|
+
|
|
84
|
+
const loaded = [];
|
|
85
|
+
const files = readdirSync(dirPath).filter((f) => f.endsWith(".json"));
|
|
86
|
+
|
|
87
|
+
for (const file of files) {
|
|
88
|
+
const id = file.replace(".json", "");
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
const content = JSON.parse(readFileSync(join(dirPath, file), "utf-8"));
|
|
92
|
+
if (validateWorkflow(id, content)) {
|
|
93
|
+
store.loadDefinition(id, content);
|
|
94
|
+
loaded.push(id);
|
|
95
|
+
}
|
|
96
|
+
} catch (e) {
|
|
97
|
+
console.error(`Error loading catalog workflow ${id}: ${e.message}`);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
return loaded;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Load workflows: catalog first, then project overwrites (project takes precedence)
|
|
106
|
+
*/
|
|
107
|
+
function loadWorkflows() {
|
|
108
|
+
const catalogPath = join(CATALOG_PATH, "workflows");
|
|
109
|
+
const catalogLoaded = loadCatalogWorkflows(catalogPath);
|
|
110
|
+
|
|
111
|
+
const projectLoaded = existsSync(WORKFLOWS_PATH) ? loadProjectWorkflows(WORKFLOWS_PATH) : [];
|
|
112
|
+
|
|
113
|
+
// Determine which IDs came from where (project overwrites catalog)
|
|
114
|
+
const fromCatalog = catalogLoaded.filter((id) => !projectLoaded.includes(id));
|
|
115
|
+
const fromProject = projectLoaded;
|
|
116
|
+
|
|
117
|
+
return {
|
|
118
|
+
catalog: fromCatalog,
|
|
119
|
+
project: fromProject,
|
|
120
|
+
loaded: [...new Set([...catalogLoaded, ...projectLoaded])],
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Initialize server
|
|
125
|
+
const server = new Server({ name: "navigator", version: "2.0.0" }, { capabilities: { tools: {} } });
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Minimal JSON response
|
|
129
|
+
*/
|
|
130
|
+
function jsonResponse(data) {
|
|
131
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Define tools
|
|
135
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
136
|
+
return {
|
|
137
|
+
tools: [
|
|
138
|
+
{
|
|
139
|
+
name: "Navigate",
|
|
140
|
+
description: "Unified workflow navigation. Start a workflow, get current state, or advance to next step.",
|
|
141
|
+
inputSchema: {
|
|
142
|
+
type: "object",
|
|
143
|
+
properties: {
|
|
144
|
+
taskFilePath: {
|
|
145
|
+
type: "string",
|
|
146
|
+
description: "Path to task file (for advance/current). Reads workflow state from task metadata.",
|
|
147
|
+
},
|
|
148
|
+
workflowType: { type: "string", description: "Workflow ID (for start only, e.g., 'feature-development')" },
|
|
149
|
+
result: {
|
|
150
|
+
type: "string",
|
|
151
|
+
enum: ["passed", "failed"],
|
|
152
|
+
description: "Step result (for advance). Omit to just get current state.",
|
|
153
|
+
},
|
|
154
|
+
description: { type: "string", description: "User's task description (for start)" },
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
},
|
|
158
|
+
{
|
|
159
|
+
name: "ListWorkflows",
|
|
160
|
+
description: "List all available workflows. Returns data only, no dialog.",
|
|
161
|
+
inputSchema: {
|
|
162
|
+
type: "object",
|
|
163
|
+
properties: {},
|
|
164
|
+
},
|
|
165
|
+
},
|
|
166
|
+
{
|
|
167
|
+
name: "SelectWorkflow",
|
|
168
|
+
description:
|
|
169
|
+
"Get workflow selection dialog for user interaction. Returns workflows with multi-pane dialog for AskUserQuestion.",
|
|
170
|
+
inputSchema: {
|
|
171
|
+
type: "object",
|
|
172
|
+
properties: {},
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
{
|
|
176
|
+
name: "Diagram",
|
|
177
|
+
description: "Generate a mermaid flowchart diagram for a workflow.",
|
|
178
|
+
inputSchema: {
|
|
179
|
+
type: "object",
|
|
180
|
+
properties: {
|
|
181
|
+
workflowType: { type: "string", description: "Workflow ID to visualize" },
|
|
182
|
+
currentStep: { type: "string", description: "Optional: highlight this step" },
|
|
183
|
+
},
|
|
184
|
+
required: ["workflowType"],
|
|
185
|
+
},
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
name: "CopyWorkflows",
|
|
189
|
+
description:
|
|
190
|
+
"Copy workflows from catalog to project. Creates workflow directory with workflow.json, README.md, and step instruction files.",
|
|
191
|
+
inputSchema: {
|
|
192
|
+
type: "object",
|
|
193
|
+
properties: {
|
|
194
|
+
workflowIds: {
|
|
195
|
+
type: "array",
|
|
196
|
+
items: { type: "string" },
|
|
197
|
+
description: "Workflow IDs to copy. Empty = all.",
|
|
198
|
+
},
|
|
199
|
+
},
|
|
200
|
+
},
|
|
201
|
+
},
|
|
202
|
+
{
|
|
203
|
+
name: "ListCatalog",
|
|
204
|
+
description: "List workflows available in the catalog.",
|
|
205
|
+
inputSchema: {
|
|
206
|
+
type: "object",
|
|
207
|
+
properties: {},
|
|
208
|
+
},
|
|
209
|
+
},
|
|
210
|
+
],
|
|
211
|
+
};
|
|
212
|
+
});
|
|
213
|
+
|
|
214
|
+
// Handle tool execution
|
|
215
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
216
|
+
const { name, arguments: args } = request.params;
|
|
217
|
+
|
|
218
|
+
try {
|
|
219
|
+
switch (name) {
|
|
220
|
+
case "Navigate": {
|
|
221
|
+
const result = engine.navigate({
|
|
222
|
+
taskFilePath: args.taskFilePath,
|
|
223
|
+
workflowType: args.workflowType,
|
|
224
|
+
result: args.result,
|
|
225
|
+
description: args.description,
|
|
226
|
+
});
|
|
227
|
+
return jsonResponse(result);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
case "ListWorkflows": {
|
|
231
|
+
return jsonResponse({
|
|
232
|
+
schemaVersion: 2,
|
|
233
|
+
workflows: store.listWorkflows(),
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
case "SelectWorkflow": {
|
|
238
|
+
const workflows = store.listWorkflows();
|
|
239
|
+
return jsonResponse(buildWorkflowSelectionDialog(workflows));
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
case "Diagram": {
|
|
243
|
+
const wfDef = store.getDefinition(args.workflowType);
|
|
244
|
+
if (!wfDef) {
|
|
245
|
+
throw new Error(`Workflow '${args.workflowType}' not found`);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
const markdown = generateDiagram(wfDef, args.currentStep);
|
|
249
|
+
|
|
250
|
+
// Save diagram to file
|
|
251
|
+
if (!existsSync(DIAGRAMS_PATH)) {
|
|
252
|
+
mkdirSync(DIAGRAMS_PATH, { recursive: true });
|
|
253
|
+
}
|
|
254
|
+
const filePath = join(DIAGRAMS_PATH, `${args.workflowType}.md`);
|
|
255
|
+
writeFileSync(filePath, markdown);
|
|
256
|
+
|
|
257
|
+
return jsonResponse({ savedTo: filePath });
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
case "CopyWorkflows": {
|
|
261
|
+
const catalogPath = join(CATALOG_PATH, "workflows");
|
|
262
|
+
if (!existsSync(catalogPath)) {
|
|
263
|
+
throw new Error("Catalog workflows directory not found");
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
if (!existsSync(WORKFLOWS_PATH)) {
|
|
267
|
+
mkdirSync(WORKFLOWS_PATH, { recursive: true });
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// Write README files
|
|
271
|
+
writeFileSync(join(FLOW_PATH, "README.md"), generateFlowReadme());
|
|
272
|
+
writeFileSync(join(WORKFLOWS_PATH, "README.md"), generateWorkflowsReadme());
|
|
273
|
+
|
|
274
|
+
const catalogFiles = readdirSync(catalogPath).filter((f) => f.endsWith(".json"));
|
|
275
|
+
const availableIds = catalogFiles.map((f) => f.replace(".json", ""));
|
|
276
|
+
const workflowIds = computeWorkflowsToCopy(args.workflowIds, availableIds);
|
|
277
|
+
|
|
278
|
+
const copied = [];
|
|
279
|
+
const errors = [];
|
|
280
|
+
|
|
281
|
+
for (const id of workflowIds) {
|
|
282
|
+
const srcFile = join(catalogPath, `${id}.json`);
|
|
283
|
+
|
|
284
|
+
if (!existsSync(srcFile)) {
|
|
285
|
+
errors.push({ id, error: "not found in catalog" });
|
|
286
|
+
continue;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
try {
|
|
290
|
+
const content = JSON.parse(readFileSync(srcFile, "utf-8"));
|
|
291
|
+
if (!isValidWorkflowForCopy(content)) {
|
|
292
|
+
errors.push({ id, error: "invalid schema" });
|
|
293
|
+
continue;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Create workflow directory and write workflow.json
|
|
297
|
+
const workflowDir = join(WORKFLOWS_PATH, id);
|
|
298
|
+
mkdirSync(workflowDir, { recursive: true });
|
|
299
|
+
writeFileSync(join(workflowDir, "workflow.json"), JSON.stringify(content, null, 2));
|
|
300
|
+
|
|
301
|
+
// Load into memory
|
|
302
|
+
store.loadDefinition(id, content);
|
|
303
|
+
copied.push(id);
|
|
304
|
+
} catch (e) {
|
|
305
|
+
errors.push({ id, error: e.message });
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
return jsonResponse({
|
|
310
|
+
schemaVersion: 2,
|
|
311
|
+
copied,
|
|
312
|
+
errors: errors.length > 0 ? errors : undefined,
|
|
313
|
+
path: WORKFLOWS_PATH,
|
|
314
|
+
});
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
case "ListCatalog": {
|
|
318
|
+
const catalogPath = join(CATALOG_PATH, "workflows");
|
|
319
|
+
if (!existsSync(catalogPath)) {
|
|
320
|
+
return jsonResponse(buildEmptyCatalogResponse());
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
const workflows = [];
|
|
324
|
+
const files = readdirSync(catalogPath).filter((f) => f.endsWith(".json"));
|
|
325
|
+
|
|
326
|
+
for (const file of files) {
|
|
327
|
+
try {
|
|
328
|
+
const content = JSON.parse(readFileSync(join(catalogPath, file), "utf-8"));
|
|
329
|
+
workflows.push(buildWorkflowSummary(file.replace(".json", ""), content));
|
|
330
|
+
} catch {
|
|
331
|
+
// Skip invalid files
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
return jsonResponse(buildCatalogResponse(workflows));
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
default:
|
|
339
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
340
|
+
}
|
|
341
|
+
} catch (error) {
|
|
342
|
+
return jsonResponse({
|
|
343
|
+
schemaVersion: 2,
|
|
344
|
+
error: error.message,
|
|
345
|
+
tool: name,
|
|
346
|
+
args,
|
|
347
|
+
});
|
|
348
|
+
}
|
|
349
|
+
});
|
|
350
|
+
|
|
351
|
+
/**
|
|
352
|
+
* Ensure .flow/README.md exists (created on MCP server connect)
|
|
353
|
+
*/
|
|
354
|
+
function ensureFlowReadme() {
|
|
355
|
+
const readmePath = join(FLOW_PATH, "README.md");
|
|
356
|
+
if (!existsSync(readmePath)) {
|
|
357
|
+
mkdirSync(FLOW_PATH, { recursive: true });
|
|
358
|
+
writeFileSync(readmePath, generateFlowReadme());
|
|
359
|
+
console.error(` Created: ${readmePath}`);
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// Load workflows and start server
|
|
364
|
+
const workflowInfo = loadWorkflows();
|
|
365
|
+
ensureFlowReadme();
|
|
366
|
+
|
|
367
|
+
const transport = new StdioServerTransport();
|
|
368
|
+
await server.connect(transport);
|
|
369
|
+
|
|
370
|
+
console.error(`Navigator MCP Server v2 running (stateless)`);
|
|
371
|
+
console.error(` Project: ${PROJECT_ROOT}`);
|
|
372
|
+
console.error(` Workflows: ${workflowInfo.loaded.length} total`);
|
|
373
|
+
if (workflowInfo.catalog.length > 0) {
|
|
374
|
+
console.error(` Catalog: ${workflowInfo.catalog.join(", ")}`);
|
|
375
|
+
}
|
|
376
|
+
if (workflowInfo.project.length > 0) {
|
|
377
|
+
console.error(` Project: ${workflowInfo.project.join(", ")}`);
|
|
378
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@leclabs/agent-flow-navigator-mcp",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "MCP server that navigates agents through DAG-based workflows",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"author": "leclabs",
|
|
7
|
+
"homepage": "https://github.com/leclabs/agent-toolkit",
|
|
8
|
+
"bugs": {
|
|
9
|
+
"url": "https://github.com/leclabs/agent-toolkit/issues"
|
|
10
|
+
},
|
|
11
|
+
"bin": {
|
|
12
|
+
"agent-flow-navigator": "./index.js"
|
|
13
|
+
},
|
|
14
|
+
"main": "index.js",
|
|
15
|
+
"type": "module",
|
|
16
|
+
"scripts": {
|
|
17
|
+
"start": "node index.js",
|
|
18
|
+
"test": "node --test engine.test.js diagram.test.js store.test.js dialog.test.js copier.test.js catalog.test.js"
|
|
19
|
+
},
|
|
20
|
+
"keywords": [
|
|
21
|
+
"mcp",
|
|
22
|
+
"workflow",
|
|
23
|
+
"flow",
|
|
24
|
+
"navigator",
|
|
25
|
+
"state-machine",
|
|
26
|
+
"claude",
|
|
27
|
+
"ai-agent"
|
|
28
|
+
],
|
|
29
|
+
"repository": {
|
|
30
|
+
"type": "git",
|
|
31
|
+
"url": "git+https://github.com/leclabs/agent-toolkit.git",
|
|
32
|
+
"directory": "packages/agent-flow-navigator-mcp"
|
|
33
|
+
},
|
|
34
|
+
"files": [
|
|
35
|
+
"index.js",
|
|
36
|
+
"engine.js",
|
|
37
|
+
"diagram.js",
|
|
38
|
+
"store.js",
|
|
39
|
+
"dialog.js",
|
|
40
|
+
"copier.js",
|
|
41
|
+
"catalog.js",
|
|
42
|
+
"catalog/**/*.json",
|
|
43
|
+
"types.d.ts"
|
|
44
|
+
],
|
|
45
|
+
"dependencies": {
|
|
46
|
+
"@modelcontextprotocol/sdk": "^1.25.3",
|
|
47
|
+
"zod": "^3.22.4"
|
|
48
|
+
}
|
|
49
|
+
}
|
package/store.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* store.js - Pure workflow store module
|
|
3
|
+
*
|
|
4
|
+
* Manages workflow definitions in memory. Pure data structure with no I/O.
|
|
5
|
+
* File loading handled by MCP server initialization.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Validate workflow schema
|
|
10
|
+
* @param {string} id - Workflow identifier
|
|
11
|
+
* @param {Object} content - Workflow definition
|
|
12
|
+
* @returns {boolean} True if valid
|
|
13
|
+
*/
|
|
14
|
+
export function validateWorkflow(id, content) {
|
|
15
|
+
if (!content.nodes || typeof content.nodes !== "object") {
|
|
16
|
+
console.error(`Invalid workflow ${id}: missing 'nodes' object`);
|
|
17
|
+
return false;
|
|
18
|
+
}
|
|
19
|
+
if (!content.edges || !Array.isArray(content.edges)) {
|
|
20
|
+
console.error(`Invalid workflow ${id}: missing 'edges' array`);
|
|
21
|
+
return false;
|
|
22
|
+
}
|
|
23
|
+
return true;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* In-memory workflow store (stateless - no task storage)
|
|
28
|
+
*/
|
|
29
|
+
export class WorkflowStore {
|
|
30
|
+
constructor() {
|
|
31
|
+
this.workflows = new Map();
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Load a workflow definition into the store
|
|
36
|
+
* @param {string} id - Workflow identifier
|
|
37
|
+
* @param {Object} workflow - Workflow definition
|
|
38
|
+
* @returns {string} The workflow id
|
|
39
|
+
*/
|
|
40
|
+
loadDefinition(id, workflow) {
|
|
41
|
+
this.workflows.set(id, workflow);
|
|
42
|
+
return id;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Get a workflow definition by id
|
|
47
|
+
* @param {string} id - Workflow identifier
|
|
48
|
+
* @returns {Object|undefined} Workflow definition or undefined
|
|
49
|
+
*/
|
|
50
|
+
getDefinition(id) {
|
|
51
|
+
return this.workflows.get(id);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* List all loaded workflows with metadata
|
|
56
|
+
* @returns {Array} Array of workflow summaries
|
|
57
|
+
*/
|
|
58
|
+
listWorkflows() {
|
|
59
|
+
return Array.from(this.workflows.entries()).map(([id, wf]) => ({
|
|
60
|
+
id,
|
|
61
|
+
name: wf.name || id,
|
|
62
|
+
description: wf.description || "",
|
|
63
|
+
stepCount: Object.keys(wf.nodes || {}).length,
|
|
64
|
+
}));
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Check if a workflow exists
|
|
69
|
+
* @param {string} id - Workflow identifier
|
|
70
|
+
* @returns {boolean} True if workflow exists
|
|
71
|
+
*/
|
|
72
|
+
has(id) {
|
|
73
|
+
return this.workflows.has(id);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Clear all workflows from the store
|
|
78
|
+
*/
|
|
79
|
+
clear() {
|
|
80
|
+
this.workflows.clear();
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Get the number of loaded workflows
|
|
85
|
+
* @returns {number} Workflow count
|
|
86
|
+
*/
|
|
87
|
+
get size() {
|
|
88
|
+
return this.workflows.size;
|
|
89
|
+
}
|
|
90
|
+
}
|
package/types.d.ts
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Flow Workflow Type Definitions
|
|
3
|
+
*
|
|
4
|
+
* Normalized schema following standard flowchart conventions:
|
|
5
|
+
* - start: Entry point (exactly one)
|
|
6
|
+
* - end: Exit point with result classification
|
|
7
|
+
* - task: Work performed by agent
|
|
8
|
+
* - gate: Review/approval checkpoint
|
|
9
|
+
* - subflow: Connector to another workflow
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
// =============================================================================
|
|
13
|
+
// Node Types (Discriminated Union)
|
|
14
|
+
// =============================================================================
|
|
15
|
+
|
|
16
|
+
export type Node = StartNode | EndNode | TaskNode | GateNode | SubflowNode;
|
|
17
|
+
|
|
18
|
+
export interface StartNode {
|
|
19
|
+
type: "start";
|
|
20
|
+
name?: string;
|
|
21
|
+
description?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* End node with result classification.
|
|
26
|
+
* - result: How the workflow concluded (success, failure, blocked, cancelled)
|
|
27
|
+
* - escalation: What action follows (hitl, alert, ticket) - optional
|
|
28
|
+
*/
|
|
29
|
+
export interface EndNode {
|
|
30
|
+
type: "end";
|
|
31
|
+
result: EndResult;
|
|
32
|
+
escalation?: Escalation;
|
|
33
|
+
name?: string;
|
|
34
|
+
description?: string;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export type EndResult = "success" | "failure" | "blocked" | "cancelled";
|
|
38
|
+
export type Escalation = "hitl" | "alert" | "ticket";
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Task node - work performed by an agent.
|
|
42
|
+
* - outputs: Possible outcomes (default: ["passed", "failed"])
|
|
43
|
+
* - maxRetries: Retry count on failure before following "failed" edge
|
|
44
|
+
*/
|
|
45
|
+
export interface TaskNode {
|
|
46
|
+
type: "task";
|
|
47
|
+
name: string;
|
|
48
|
+
description?: string;
|
|
49
|
+
agent?: string;
|
|
50
|
+
stage?: Stage;
|
|
51
|
+
outputs?: string[];
|
|
52
|
+
maxRetries?: number;
|
|
53
|
+
config?: Record<string, unknown>;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Gate node - review or approval checkpoint.
|
|
58
|
+
* Functionally similar to task but semantically different.
|
|
59
|
+
*/
|
|
60
|
+
export interface GateNode {
|
|
61
|
+
type: "gate";
|
|
62
|
+
name: string;
|
|
63
|
+
description?: string;
|
|
64
|
+
agent?: string;
|
|
65
|
+
stage?: Stage;
|
|
66
|
+
outputs?: string[];
|
|
67
|
+
maxRetries?: number;
|
|
68
|
+
config?: Record<string, unknown>;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Subflow node - connector to another workflow.
|
|
73
|
+
*/
|
|
74
|
+
export interface SubflowNode {
|
|
75
|
+
type: "subflow";
|
|
76
|
+
workflow: string;
|
|
77
|
+
name?: string;
|
|
78
|
+
description?: string;
|
|
79
|
+
inputs?: Record<string, string>;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
export type Stage = "planning" | "development" | "verification" | "delivery";
|
|
83
|
+
|
|
84
|
+
// =============================================================================
|
|
85
|
+
// Edge Definition
|
|
86
|
+
// =============================================================================
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Directed edge connecting two nodes.
|
|
90
|
+
* - from/to: Node IDs
|
|
91
|
+
* - on: Output value that triggers this edge (for conditional routing)
|
|
92
|
+
* - label: Display text
|
|
93
|
+
*/
|
|
94
|
+
export interface Edge {
|
|
95
|
+
from: string;
|
|
96
|
+
to: string;
|
|
97
|
+
on?: string;
|
|
98
|
+
label?: string;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// =============================================================================
|
|
102
|
+
// Workflow Definition
|
|
103
|
+
// =============================================================================
|
|
104
|
+
|
|
105
|
+
export interface Workflow {
|
|
106
|
+
id: string;
|
|
107
|
+
name: string;
|
|
108
|
+
description?: string;
|
|
109
|
+
nodes: Record<string, Node>;
|
|
110
|
+
edges: Edge[];
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// =============================================================================
|
|
114
|
+
// Engine Types
|
|
115
|
+
// =============================================================================
|
|
116
|
+
|
|
117
|
+
export interface EvaluationResult {
|
|
118
|
+
nextStep: string | null;
|
|
119
|
+
action: EdgeAction;
|
|
120
|
+
retriesUsed?: number;
|
|
121
|
+
retriesRemaining?: number;
|
|
122
|
+
maxRetries?: number;
|
|
123
|
+
edge?: Edge;
|
|
124
|
+
reason?: string;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export type EdgeAction =
|
|
128
|
+
| "unconditional" // Edge with no 'on' condition
|
|
129
|
+
| "conditional" // Edge matched 'on' condition
|
|
130
|
+
| "retry" // Failed but within retry limit, looping back
|
|
131
|
+
| "escalate" // Failed and exceeded retry limit
|
|
132
|
+
| "no_outgoing_edges" // Terminal node (no edges)
|
|
133
|
+
| "no_matching_edge"; // No edge matched the output
|