@synergenius/flow-weaver 0.10.12 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/generate-in-place.js +5 -4
- package/dist/api/inline-runtime.js +42 -0
- package/dist/cli/commands/run.d.ts +8 -0
- package/dist/cli/commands/run.js +396 -4
- package/dist/cli/flow-weaver.mjs +1867 -499
- package/dist/cli/index.js +4 -0
- package/dist/doc-metadata/extractors/mcp-tools.js +189 -0
- package/dist/doc-metadata/types.d.ts +1 -1
- package/dist/generator/unified.js +112 -35
- package/dist/mcp/debug-session.d.ts +30 -0
- package/dist/mcp/debug-session.js +25 -0
- package/dist/mcp/index.d.ts +1 -0
- package/dist/mcp/index.js +1 -0
- package/dist/mcp/server.js +2 -0
- package/dist/mcp/tools-debug.d.ts +3 -0
- package/dist/mcp/tools-debug.js +451 -0
- package/dist/mcp/workflow-executor.d.ts +2 -0
- package/dist/mcp/workflow-executor.js +12 -2
- package/dist/runtime/ExecutionContext.d.ts +19 -0
- package/dist/runtime/ExecutionContext.js +43 -0
- package/dist/runtime/checkpoint.d.ts +84 -0
- package/dist/runtime/checkpoint.js +225 -0
- package/dist/runtime/debug-controller.d.ts +110 -0
- package/dist/runtime/debug-controller.js +247 -0
- package/dist/runtime/index.d.ts +4 -0
- package/dist/runtime/index.js +2 -0
- package/docs/reference/cli-reference.md +9 -1
- package/docs/reference/debugging.md +152 -5
- package/package.json +1 -1
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
import { z } from 'zod';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import * as fs from 'fs';
|
|
4
|
+
import { parseWorkflow } from '../api/index.js';
|
|
5
|
+
import { getTopologicalOrder } from '../api/query.js';
|
|
6
|
+
import { DebugController } from '../runtime/debug-controller.js';
|
|
7
|
+
import { CheckpointWriter, loadCheckpoint, findLatestCheckpoint } from '../runtime/checkpoint.js';
|
|
8
|
+
import { executeWorkflowFromFile } from './workflow-executor.js';
|
|
9
|
+
import { AgentChannel } from './agent-channel.js';
|
|
10
|
+
import { storeDebugSession, getDebugSession, removeDebugSession, listDebugSessions, } from './debug-session.js';
|
|
11
|
+
import { makeToolResult, makeErrorResult } from './response-utils.js';
|
|
12
|
+
/**
|
|
13
|
+
* Helper: get execution order for a workflow file by parsing its annotations.
|
|
14
|
+
*/
|
|
15
|
+
async function getExecutionOrder(filePath, workflowName) {
|
|
16
|
+
const source = fs.readFileSync(path.resolve(filePath), 'utf8');
|
|
17
|
+
const parsed = await parseWorkflow(source, { workflowName });
|
|
18
|
+
if (parsed.errors.length > 0) {
|
|
19
|
+
throw new Error(`Failed to parse workflow: ${parsed.errors.join(', ')}`);
|
|
20
|
+
}
|
|
21
|
+
return getTopologicalOrder(parsed.ast);
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Helper: race execution against debug pause. Returns the outcome.
|
|
25
|
+
*/
|
|
26
|
+
async function raceDebugPause(session) {
|
|
27
|
+
try {
|
|
28
|
+
const raceResult = await Promise.race([
|
|
29
|
+
session.executionPromise.then((r) => ({
|
|
30
|
+
type: 'completed',
|
|
31
|
+
result: r?.result ?? r,
|
|
32
|
+
})),
|
|
33
|
+
session.controller.onPause().then((state) => ({
|
|
34
|
+
type: 'paused',
|
|
35
|
+
state,
|
|
36
|
+
})),
|
|
37
|
+
]);
|
|
38
|
+
if (raceResult.type === 'paused') {
|
|
39
|
+
session.lastPauseState = raceResult.state;
|
|
40
|
+
}
|
|
41
|
+
return raceResult;
|
|
42
|
+
}
|
|
43
|
+
catch (err) {
|
|
44
|
+
return {
|
|
45
|
+
type: 'error',
|
|
46
|
+
message: err instanceof Error ? err.message : String(err),
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Helper: clean up a debug session after completion or abort.
|
|
52
|
+
*/
|
|
53
|
+
function cleanupDebugSession(debugId) {
|
|
54
|
+
const session = getDebugSession(debugId);
|
|
55
|
+
if (!session)
|
|
56
|
+
return;
|
|
57
|
+
for (const tmpFile of session.tmpFiles) {
|
|
58
|
+
try {
|
|
59
|
+
fs.unlinkSync(tmpFile);
|
|
60
|
+
}
|
|
61
|
+
catch {
|
|
62
|
+
/* ignore */
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
removeDebugSession(debugId);
|
|
66
|
+
}
|
|
67
|
+
/**
|
|
68
|
+
* Helper: find the variable key for a nodeId:portName pair from the variables map.
|
|
69
|
+
* Returns the full key (nodeId:portName:executionIndex) or null.
|
|
70
|
+
*/
|
|
71
|
+
function findVariableKey(variables, nodeId, portName, executionIndex) {
|
|
72
|
+
const prefix = `${nodeId}:${portName}:`;
|
|
73
|
+
if (executionIndex !== undefined) {
|
|
74
|
+
const key = `${prefix}${executionIndex}`;
|
|
75
|
+
return key in variables ? key : null;
|
|
76
|
+
}
|
|
77
|
+
// Find the latest execution index for this nodeId:portName
|
|
78
|
+
let latestKey = null;
|
|
79
|
+
let latestIndex = -1;
|
|
80
|
+
for (const key of Object.keys(variables)) {
|
|
81
|
+
if (key.startsWith(prefix)) {
|
|
82
|
+
const idx = parseInt(key.substring(prefix.length), 10);
|
|
83
|
+
if (idx > latestIndex) {
|
|
84
|
+
latestIndex = idx;
|
|
85
|
+
latestKey = key;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
return latestKey;
|
|
90
|
+
}
|
|
91
|
+
export function registerDebugTools(mcp) {
|
|
92
|
+
// -------------------------------------------------------------------------
|
|
93
|
+
// fw_debug_workflow — Start a debug session
|
|
94
|
+
// -------------------------------------------------------------------------
|
|
95
|
+
mcp.tool('fw_debug_workflow', 'Start a step-through debug session for a workflow. Compiles and executes the workflow, ' +
|
|
96
|
+
'pausing before the first node. Returns a debugId and the initial pause state.', {
|
|
97
|
+
filePath: z.string().describe('Path to the workflow .ts file'),
|
|
98
|
+
workflowName: z.string().optional().describe('Specific workflow function name (for multi-workflow files)'),
|
|
99
|
+
params: z.record(z.unknown()).optional().describe('Parameters to pass to the workflow'),
|
|
100
|
+
breakpoints: z.array(z.string()).optional().describe('Node IDs to set as initial breakpoints'),
|
|
101
|
+
checkpoint: z.boolean().optional().describe('Enable checkpointing to disk after each node (default: false)'),
|
|
102
|
+
}, async (args) => {
|
|
103
|
+
try {
|
|
104
|
+
const debugId = `debug-${Date.now()}-${Math.random().toString(36).slice(2)}`;
|
|
105
|
+
// Get execution order from the workflow file
|
|
106
|
+
const executionOrder = await getExecutionOrder(args.filePath, args.workflowName);
|
|
107
|
+
// Set up checkpoint writer if requested
|
|
108
|
+
let checkpointWriter;
|
|
109
|
+
if (args.checkpoint) {
|
|
110
|
+
checkpointWriter = new CheckpointWriter(args.filePath, args.workflowName ?? 'default', debugId, args.params);
|
|
111
|
+
}
|
|
112
|
+
// Create the debug controller
|
|
113
|
+
const controller = new DebugController({
|
|
114
|
+
debug: true,
|
|
115
|
+
checkpoint: args.checkpoint ?? false,
|
|
116
|
+
checkpointWriter,
|
|
117
|
+
breakpoints: args.breakpoints,
|
|
118
|
+
executionOrder,
|
|
119
|
+
});
|
|
120
|
+
// Create agent channel in case the workflow uses waitForAgent
|
|
121
|
+
const agentChannel = new AgentChannel();
|
|
122
|
+
// Start execution (non-blocking: the workflow will pause at the first node)
|
|
123
|
+
const execPromise = executeWorkflowFromFile(args.filePath, args.params, {
|
|
124
|
+
workflowName: args.workflowName,
|
|
125
|
+
includeTrace: true,
|
|
126
|
+
agentChannel,
|
|
127
|
+
debugController: controller,
|
|
128
|
+
});
|
|
129
|
+
// Store the session
|
|
130
|
+
const session = {
|
|
131
|
+
debugId,
|
|
132
|
+
filePath: args.filePath,
|
|
133
|
+
workflowName: args.workflowName,
|
|
134
|
+
controller,
|
|
135
|
+
executionPromise: execPromise,
|
|
136
|
+
createdAt: Date.now(),
|
|
137
|
+
tmpFiles: [],
|
|
138
|
+
};
|
|
139
|
+
storeDebugSession(session);
|
|
140
|
+
// Wait for the first pause or immediate completion
|
|
141
|
+
const outcome = await raceDebugPause(session);
|
|
142
|
+
if (outcome.type === 'paused') {
|
|
143
|
+
return makeToolResult({
|
|
144
|
+
debugId,
|
|
145
|
+
status: 'paused',
|
|
146
|
+
state: outcome.state,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
if (outcome.type === 'completed') {
|
|
150
|
+
cleanupDebugSession(debugId);
|
|
151
|
+
return makeToolResult({
|
|
152
|
+
debugId,
|
|
153
|
+
status: 'completed',
|
|
154
|
+
result: outcome.result,
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
// Error
|
|
158
|
+
cleanupDebugSession(debugId);
|
|
159
|
+
return makeErrorResult('EXECUTION_ERROR', outcome.message);
|
|
160
|
+
}
|
|
161
|
+
catch (err) {
|
|
162
|
+
return makeErrorResult('DEBUG_START_ERROR', err instanceof Error ? err.message : String(err));
|
|
163
|
+
}
|
|
164
|
+
});
|
|
165
|
+
// -------------------------------------------------------------------------
|
|
166
|
+
// fw_debug_step — Execute next node then pause
|
|
167
|
+
// -------------------------------------------------------------------------
|
|
168
|
+
mcp.tool('fw_debug_step', 'Step to the next node in a debug session. Executes one node then pauses again.', {
|
|
169
|
+
debugId: z.string().describe('The debug session ID from fw_debug_workflow'),
|
|
170
|
+
}, async (args) => {
|
|
171
|
+
const session = getDebugSession(args.debugId);
|
|
172
|
+
if (!session) {
|
|
173
|
+
return makeErrorResult('SESSION_NOT_FOUND', `No debug session found with ID "${args.debugId}". Use fw_debug_workflow to start one.`);
|
|
174
|
+
}
|
|
175
|
+
try {
|
|
176
|
+
session.controller.resume({ type: 'step' });
|
|
177
|
+
const outcome = await raceDebugPause(session);
|
|
178
|
+
if (outcome.type === 'paused') {
|
|
179
|
+
return makeToolResult({
|
|
180
|
+
status: 'paused',
|
|
181
|
+
state: outcome.state,
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
if (outcome.type === 'completed') {
|
|
185
|
+
cleanupDebugSession(args.debugId);
|
|
186
|
+
return makeToolResult({
|
|
187
|
+
status: 'completed',
|
|
188
|
+
result: outcome.result,
|
|
189
|
+
});
|
|
190
|
+
}
|
|
191
|
+
cleanupDebugSession(args.debugId);
|
|
192
|
+
return makeErrorResult('EXECUTION_ERROR', outcome.message);
|
|
193
|
+
}
|
|
194
|
+
catch (err) {
|
|
195
|
+
cleanupDebugSession(args.debugId);
|
|
196
|
+
return makeErrorResult('STEP_ERROR', err instanceof Error ? err.message : String(err));
|
|
197
|
+
}
|
|
198
|
+
});
|
|
199
|
+
// -------------------------------------------------------------------------
|
|
200
|
+
// fw_debug_continue — Run to completion or breakpoint
|
|
201
|
+
// -------------------------------------------------------------------------
|
|
202
|
+
mcp.tool('fw_debug_continue', 'Continue execution from the current pause point. Runs to completion, or stops at the ' +
|
|
203
|
+
'next breakpoint if toBreakpoint is true.', {
|
|
204
|
+
debugId: z.string().describe('The debug session ID'),
|
|
205
|
+
toBreakpoint: z
|
|
206
|
+
.boolean()
|
|
207
|
+
.optional()
|
|
208
|
+
.describe('If true, pause at the next breakpoint instead of running to completion'),
|
|
209
|
+
}, async (args) => {
|
|
210
|
+
const session = getDebugSession(args.debugId);
|
|
211
|
+
if (!session) {
|
|
212
|
+
return makeErrorResult('SESSION_NOT_FOUND', `No debug session found with ID "${args.debugId}".`);
|
|
213
|
+
}
|
|
214
|
+
try {
|
|
215
|
+
const action = args.toBreakpoint
|
|
216
|
+
? { type: 'continueToBreakpoint' }
|
|
217
|
+
: { type: 'continue' };
|
|
218
|
+
session.controller.resume(action);
|
|
219
|
+
const outcome = await raceDebugPause(session);
|
|
220
|
+
if (outcome.type === 'paused') {
|
|
221
|
+
return makeToolResult({
|
|
222
|
+
status: 'paused',
|
|
223
|
+
state: outcome.state,
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
if (outcome.type === 'completed') {
|
|
227
|
+
cleanupDebugSession(args.debugId);
|
|
228
|
+
return makeToolResult({
|
|
229
|
+
status: 'completed',
|
|
230
|
+
result: outcome.result,
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
cleanupDebugSession(args.debugId);
|
|
234
|
+
return makeErrorResult('EXECUTION_ERROR', outcome.message);
|
|
235
|
+
}
|
|
236
|
+
catch (err) {
|
|
237
|
+
cleanupDebugSession(args.debugId);
|
|
238
|
+
return makeErrorResult('CONTINUE_ERROR', err instanceof Error ? err.message : String(err));
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
// -------------------------------------------------------------------------
|
|
242
|
+
// fw_debug_inspect — Read current state without advancing
|
|
243
|
+
// -------------------------------------------------------------------------
|
|
244
|
+
mcp.tool('fw_debug_inspect', 'Inspect the current debug state without advancing execution. Returns all variables, ' +
|
|
245
|
+
'or filter to a specific node.', {
|
|
246
|
+
debugId: z.string().describe('The debug session ID'),
|
|
247
|
+
nodeId: z.string().optional().describe('Filter to show only this node\'s variables'),
|
|
248
|
+
}, async (args) => {
|
|
249
|
+
const session = getDebugSession(args.debugId);
|
|
250
|
+
if (!session) {
|
|
251
|
+
return makeErrorResult('SESSION_NOT_FOUND', `No debug session found with ID "${args.debugId}".`);
|
|
252
|
+
}
|
|
253
|
+
if (!session.lastPauseState) {
|
|
254
|
+
return makeErrorResult('NOT_PAUSED', 'Debug session has not paused yet. Wait for the workflow to reach a pause point.');
|
|
255
|
+
}
|
|
256
|
+
const state = { ...session.lastPauseState };
|
|
257
|
+
// Filter variables to a specific node if requested
|
|
258
|
+
if (args.nodeId) {
|
|
259
|
+
const prefix = `${args.nodeId}:`;
|
|
260
|
+
const filtered = {};
|
|
261
|
+
for (const [key, value] of Object.entries(state.variables)) {
|
|
262
|
+
if (key.startsWith(prefix)) {
|
|
263
|
+
// Show as portName:executionIndex for readability
|
|
264
|
+
filtered[key.substring(prefix.length)] = value;
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
return makeToolResult({
|
|
268
|
+
nodeId: args.nodeId,
|
|
269
|
+
variables: filtered,
|
|
270
|
+
state: {
|
|
271
|
+
currentNodeId: state.currentNodeId,
|
|
272
|
+
phase: state.phase,
|
|
273
|
+
position: state.position,
|
|
274
|
+
completedNodes: state.completedNodes,
|
|
275
|
+
},
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
return makeToolResult({ state });
|
|
279
|
+
});
|
|
280
|
+
// -------------------------------------------------------------------------
|
|
281
|
+
// fw_debug_set_variable — Modify a variable value
|
|
282
|
+
// -------------------------------------------------------------------------
|
|
283
|
+
mcp.tool('fw_debug_set_variable', 'Modify a variable value in the debug session. The new value will be used by downstream ' +
|
|
284
|
+
'nodes when execution continues.', {
|
|
285
|
+
debugId: z.string().describe('The debug session ID'),
|
|
286
|
+
nodeId: z.string().describe('The node that produced the variable'),
|
|
287
|
+
portName: z.string().describe('The output port name'),
|
|
288
|
+
value: z.unknown().describe('The new value to set'),
|
|
289
|
+
executionIndex: z.number().optional().describe('Execution index (defaults to the latest)'),
|
|
290
|
+
}, async (args) => {
|
|
291
|
+
const session = getDebugSession(args.debugId);
|
|
292
|
+
if (!session) {
|
|
293
|
+
return makeErrorResult('SESSION_NOT_FOUND', `No debug session found with ID "${args.debugId}".`);
|
|
294
|
+
}
|
|
295
|
+
if (!session.lastPauseState) {
|
|
296
|
+
return makeErrorResult('NOT_PAUSED', 'Cannot modify variables when the session is not paused.');
|
|
297
|
+
}
|
|
298
|
+
// Find the variable key
|
|
299
|
+
const key = findVariableKey(session.lastPauseState.variables, args.nodeId, args.portName, args.executionIndex);
|
|
300
|
+
if (!key) {
|
|
301
|
+
return makeErrorResult('VARIABLE_NOT_FOUND', `No variable found for ${args.nodeId}.${args.portName}` +
|
|
302
|
+
(args.executionIndex !== undefined ? `[${args.executionIndex}]` : '') +
|
|
303
|
+
'. Check fw_debug_inspect to see available variables.');
|
|
304
|
+
}
|
|
305
|
+
// Queue the modification (applied before the next node executes)
|
|
306
|
+
session.controller.setVariable(key, args.value);
|
|
307
|
+
// Update the lastPauseState to reflect the modification
|
|
308
|
+
session.lastPauseState.variables[key] = args.value;
|
|
309
|
+
return makeToolResult({
|
|
310
|
+
modified: key,
|
|
311
|
+
value: args.value,
|
|
312
|
+
});
|
|
313
|
+
});
|
|
314
|
+
// -------------------------------------------------------------------------
|
|
315
|
+
// fw_debug_breakpoint — Manage breakpoints
|
|
316
|
+
// -------------------------------------------------------------------------
|
|
317
|
+
mcp.tool('fw_debug_breakpoint', 'Add, remove, or list breakpoints in a debug session. Breakpoints cause execution to ' +
|
|
318
|
+
'pause when running with fw_debug_continue(toBreakpoint: true).', {
|
|
319
|
+
debugId: z.string().describe('The debug session ID'),
|
|
320
|
+
action: z.enum(['add', 'remove', 'list']).describe('Action to perform'),
|
|
321
|
+
nodeId: z.string().optional().describe('Node ID for add/remove (not needed for list)'),
|
|
322
|
+
}, async (args) => {
|
|
323
|
+
const session = getDebugSession(args.debugId);
|
|
324
|
+
if (!session) {
|
|
325
|
+
return makeErrorResult('SESSION_NOT_FOUND', `No debug session found with ID "${args.debugId}".`);
|
|
326
|
+
}
|
|
327
|
+
if (args.action === 'add') {
|
|
328
|
+
if (!args.nodeId) {
|
|
329
|
+
return makeErrorResult('MISSING_PARAM', 'nodeId is required to add a breakpoint');
|
|
330
|
+
}
|
|
331
|
+
session.controller.addBreakpoint(args.nodeId);
|
|
332
|
+
}
|
|
333
|
+
else if (args.action === 'remove') {
|
|
334
|
+
if (!args.nodeId) {
|
|
335
|
+
return makeErrorResult('MISSING_PARAM', 'nodeId is required to remove a breakpoint');
|
|
336
|
+
}
|
|
337
|
+
session.controller.removeBreakpoint(args.nodeId);
|
|
338
|
+
}
|
|
339
|
+
return makeToolResult({
|
|
340
|
+
breakpoints: session.controller.getBreakpoints(),
|
|
341
|
+
});
|
|
342
|
+
});
|
|
343
|
+
// -------------------------------------------------------------------------
|
|
344
|
+
// fw_resume_from_checkpoint — Resume a crashed workflow
|
|
345
|
+
// -------------------------------------------------------------------------
|
|
346
|
+
mcp.tool('fw_resume_from_checkpoint', 'Resume a workflow from a checkpoint file written after a crash. Skips already-completed ' +
|
|
347
|
+
'nodes and re-runs from the last checkpoint position.', {
|
|
348
|
+
filePath: z.string().describe('Path to the workflow .ts file'),
|
|
349
|
+
checkpointFile: z
|
|
350
|
+
.string()
|
|
351
|
+
.optional()
|
|
352
|
+
.describe('Path to the checkpoint file. If omitted, auto-detects the latest.'),
|
|
353
|
+
workflowName: z
|
|
354
|
+
.string()
|
|
355
|
+
.optional()
|
|
356
|
+
.describe('Workflow function name (for multi-workflow files)'),
|
|
357
|
+
debug: z
|
|
358
|
+
.boolean()
|
|
359
|
+
.optional()
|
|
360
|
+
.describe('Enter step-through debug mode at the resume point (default: false)'),
|
|
361
|
+
}, async (args) => {
|
|
362
|
+
try {
|
|
363
|
+
// Find checkpoint file
|
|
364
|
+
const checkpointPath = args.checkpointFile ?? findLatestCheckpoint(args.filePath, args.workflowName);
|
|
365
|
+
if (!checkpointPath) {
|
|
366
|
+
return makeErrorResult('NO_CHECKPOINT', `No checkpoint file found for ${args.filePath}. ` +
|
|
367
|
+
'Checkpoints are created when running with checkpoint: true.');
|
|
368
|
+
}
|
|
369
|
+
// Load and validate checkpoint
|
|
370
|
+
const { data, stale, rerunNodes, skipNodes } = loadCheckpoint(checkpointPath, args.filePath);
|
|
371
|
+
const skippedCount = data.completedNodes.length - rerunNodes.length;
|
|
372
|
+
// Create debug controller with skip nodes from checkpoint
|
|
373
|
+
const controller = new DebugController({
|
|
374
|
+
debug: args.debug ?? false,
|
|
375
|
+
checkpoint: true,
|
|
376
|
+
checkpointWriter: new CheckpointWriter(args.filePath, data.workflowName, `resume-${Date.now()}`, data.params),
|
|
377
|
+
executionOrder: data.executionOrder,
|
|
378
|
+
skipNodes,
|
|
379
|
+
});
|
|
380
|
+
const agentChannel = new AgentChannel();
|
|
381
|
+
// Execute with the skip nodes configured
|
|
382
|
+
const execPromise = executeWorkflowFromFile(args.filePath, data.params, {
|
|
383
|
+
workflowName: data.workflowName,
|
|
384
|
+
includeTrace: true,
|
|
385
|
+
agentChannel,
|
|
386
|
+
debugController: controller,
|
|
387
|
+
});
|
|
388
|
+
// If debug mode, handle like fw_debug_workflow
|
|
389
|
+
if (args.debug) {
|
|
390
|
+
const debugId = `debug-resume-${Date.now()}-${Math.random().toString(36).slice(2)}`;
|
|
391
|
+
const session = {
|
|
392
|
+
debugId,
|
|
393
|
+
filePath: args.filePath,
|
|
394
|
+
workflowName: data.workflowName,
|
|
395
|
+
controller,
|
|
396
|
+
executionPromise: execPromise,
|
|
397
|
+
createdAt: Date.now(),
|
|
398
|
+
tmpFiles: [],
|
|
399
|
+
};
|
|
400
|
+
storeDebugSession(session);
|
|
401
|
+
const outcome = await raceDebugPause(session);
|
|
402
|
+
if (outcome.type === 'paused') {
|
|
403
|
+
return makeToolResult({
|
|
404
|
+
debugId,
|
|
405
|
+
status: 'paused',
|
|
406
|
+
resumedFrom: checkpointPath,
|
|
407
|
+
skippedNodes: skippedCount,
|
|
408
|
+
...(rerunNodes.length > 0 && { rerunNodes }),
|
|
409
|
+
...(stale && { warning: 'Workflow file has changed since checkpoint was written.' }),
|
|
410
|
+
state: outcome.state,
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
if (outcome.type === 'completed') {
|
|
414
|
+
cleanupDebugSession(debugId);
|
|
415
|
+
return makeToolResult({
|
|
416
|
+
status: 'completed',
|
|
417
|
+
resumedFrom: checkpointPath,
|
|
418
|
+
skippedNodes: skippedCount,
|
|
419
|
+
...(rerunNodes.length > 0 && { rerunNodes }),
|
|
420
|
+
result: outcome.result,
|
|
421
|
+
});
|
|
422
|
+
}
|
|
423
|
+
cleanupDebugSession(debugId);
|
|
424
|
+
return makeErrorResult('EXECUTION_ERROR', outcome.message);
|
|
425
|
+
}
|
|
426
|
+
// Non-debug mode: run to completion
|
|
427
|
+
const result = await execPromise;
|
|
428
|
+
// Clean up the checkpoint file on successful completion
|
|
429
|
+
const writer = new CheckpointWriter(args.filePath, data.workflowName, '', {});
|
|
430
|
+
writer.cleanup();
|
|
431
|
+
return makeToolResult({
|
|
432
|
+
status: 'completed',
|
|
433
|
+
resumedFrom: checkpointPath,
|
|
434
|
+
skippedNodes: skippedCount,
|
|
435
|
+
...(rerunNodes.length > 0 && { rerunNodes }),
|
|
436
|
+
...(stale && { warning: 'Workflow file has changed since checkpoint was written.' }),
|
|
437
|
+
result: result?.result ?? result,
|
|
438
|
+
});
|
|
439
|
+
}
|
|
440
|
+
catch (err) {
|
|
441
|
+
return makeErrorResult('RESUME_ERROR', err instanceof Error ? err.message : String(err));
|
|
442
|
+
}
|
|
443
|
+
});
|
|
444
|
+
// -------------------------------------------------------------------------
|
|
445
|
+
// fw_list_debug_sessions — List active debug sessions
|
|
446
|
+
// -------------------------------------------------------------------------
|
|
447
|
+
mcp.tool('fw_list_debug_sessions', 'List all active debug sessions.', {}, async () => {
|
|
448
|
+
return makeToolResult(listDebugSessions());
|
|
449
|
+
});
|
|
450
|
+
}
|
|
451
|
+
//# sourceMappingURL=tools-debug.js.map
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
*/
|
|
5
5
|
import type { FwMockConfig } from '../built-in-nodes/mock-types.js';
|
|
6
6
|
import type { AgentChannel } from './agent-channel.js';
|
|
7
|
+
import type { DebugController } from '../runtime/debug-controller.js';
|
|
7
8
|
/** A single trace event captured during workflow execution. */
|
|
8
9
|
export interface ExecutionTraceEvent {
|
|
9
10
|
/** The event type (e.g. "NODE_STARTED", "NODE_COMPLETED"). */
|
|
@@ -69,6 +70,7 @@ export declare function executeWorkflowFromFile(filePath: string, params?: Recor
|
|
|
69
70
|
includeTrace?: boolean;
|
|
70
71
|
mocks?: FwMockConfig;
|
|
71
72
|
agentChannel?: AgentChannel;
|
|
73
|
+
debugController?: DebugController;
|
|
72
74
|
onEvent?: (event: ExecutionTraceEvent) => void;
|
|
73
75
|
}): Promise<ExecuteWorkflowResult>;
|
|
74
76
|
/** Compute a concise summary from raw trace events. */
|
|
@@ -44,13 +44,18 @@ export async function executeWorkflowFromFile(filePath, params, options) {
|
|
|
44
44
|
// Discover all workflows in the file
|
|
45
45
|
const source = fs.readFileSync(resolvedPath, 'utf8');
|
|
46
46
|
const allWorkflows = getAvailableWorkflows(source);
|
|
47
|
-
// Compile each workflow in-place so all function bodies are generated
|
|
47
|
+
// Compile each workflow in-place so all function bodies are generated.
|
|
48
|
+
// Debug controller requires dev mode (production: false) so that
|
|
49
|
+
// __ctrl__.beforeNode/afterNode hooks are emitted in generated code.
|
|
50
|
+
const production = options?.debugController
|
|
51
|
+
? false
|
|
52
|
+
: (options?.production ?? !includeTrace);
|
|
48
53
|
for (const wf of allWorkflows) {
|
|
49
54
|
await compileWorkflow(tmpTsFile, {
|
|
50
55
|
write: true,
|
|
51
56
|
inPlace: true,
|
|
52
57
|
parse: { workflowName: wf.functionName },
|
|
53
|
-
generate: { production
|
|
58
|
+
generate: { production },
|
|
54
59
|
});
|
|
55
60
|
}
|
|
56
61
|
// Inject debugger binding: replace the TypeScript-only `declare const`
|
|
@@ -93,6 +98,10 @@ export async function executeWorkflowFromFile(filePath, params, options) {
|
|
|
93
98
|
if (options?.agentChannel) {
|
|
94
99
|
globalThis.__fw_agent_channel__ = options.agentChannel;
|
|
95
100
|
}
|
|
101
|
+
// Set debug controller for step-through debugging and checkpoint/resume
|
|
102
|
+
if (options?.debugController) {
|
|
103
|
+
globalThis.__fw_debug_controller__ = options.debugController;
|
|
104
|
+
}
|
|
96
105
|
// Dynamic import using file:// URL for cross-platform compatibility
|
|
97
106
|
// (Windows paths like C:\... break with bare import() — "Received protocol 'c:'")
|
|
98
107
|
const mod = await import(pathToFileURL(tmpFile).href);
|
|
@@ -127,6 +136,7 @@ export async function executeWorkflowFromFile(filePath, params, options) {
|
|
|
127
136
|
delete globalThis.__fw_mocks__;
|
|
128
137
|
delete globalThis.__fw_workflow_registry__;
|
|
129
138
|
delete globalThis.__fw_agent_channel__;
|
|
139
|
+
delete globalThis.__fw_debug_controller__;
|
|
130
140
|
// Clean up temp files
|
|
131
141
|
try {
|
|
132
142
|
fs.unlinkSync(tmpFile);
|
|
@@ -143,6 +143,25 @@ export declare class GeneratedExecutionContext {
|
|
|
143
143
|
status: 'SUCCEEDED' | 'FAILED' | 'CANCELLED';
|
|
144
144
|
result?: unknown;
|
|
145
145
|
}): void;
|
|
146
|
+
/**
|
|
147
|
+
* Serialize the execution context state for checkpointing.
|
|
148
|
+
* Function values are resolved to concrete values before serialization.
|
|
149
|
+
*/
|
|
150
|
+
serialize(): {
|
|
151
|
+
variables: Record<string, unknown>;
|
|
152
|
+
executions: Record<string, ExecutionInfo>;
|
|
153
|
+
executionCounter: number;
|
|
154
|
+
nodeExecutionCounts: Record<string, number>;
|
|
155
|
+
};
|
|
156
|
+
/**
|
|
157
|
+
* Restore execution context state from a checkpoint.
|
|
158
|
+
*/
|
|
159
|
+
restore(data: {
|
|
160
|
+
variables: Record<string, unknown>;
|
|
161
|
+
executions: Record<string, ExecutionInfo>;
|
|
162
|
+
executionCounter: number;
|
|
163
|
+
nodeExecutionCounts: Record<string, number>;
|
|
164
|
+
}): void;
|
|
146
165
|
}
|
|
147
166
|
export {};
|
|
148
167
|
//# sourceMappingURL=ExecutionContext.d.ts.map
|
|
@@ -233,5 +233,48 @@ export class GeneratedExecutionContext {
|
|
|
233
233
|
});
|
|
234
234
|
}
|
|
235
235
|
}
|
|
236
|
+
/**
|
|
237
|
+
* Serialize the execution context state for checkpointing.
|
|
238
|
+
* Function values are resolved to concrete values before serialization.
|
|
239
|
+
*/
|
|
240
|
+
serialize() {
|
|
241
|
+
const vars = {};
|
|
242
|
+
for (const [key, value] of this.variables) {
|
|
243
|
+
if (typeof value === 'function') {
|
|
244
|
+
try {
|
|
245
|
+
vars[key] = value();
|
|
246
|
+
}
|
|
247
|
+
catch {
|
|
248
|
+
vars[key] = value; // Let the checkpoint layer handle the marker
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
else {
|
|
252
|
+
vars[key] = value;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
const execs = {};
|
|
256
|
+
for (const [key, info] of this.executions) {
|
|
257
|
+
execs[key] = { ...info };
|
|
258
|
+
}
|
|
259
|
+
const nodeCounts = {};
|
|
260
|
+
for (const [key, count] of this.nodeExecutionIndices) {
|
|
261
|
+
nodeCounts[key] = count;
|
|
262
|
+
}
|
|
263
|
+
return {
|
|
264
|
+
variables: vars,
|
|
265
|
+
executions: execs,
|
|
266
|
+
executionCounter: this.executionCounter,
|
|
267
|
+
nodeExecutionCounts: nodeCounts,
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Restore execution context state from a checkpoint.
|
|
272
|
+
*/
|
|
273
|
+
restore(data) {
|
|
274
|
+
this.variables = new Map(Object.entries(data.variables));
|
|
275
|
+
this.executions = new Map(Object.entries(data.executions));
|
|
276
|
+
this.executionCounter = data.executionCounter;
|
|
277
|
+
this.nodeExecutionIndices = new Map(Object.entries(data.nodeExecutionCounts));
|
|
278
|
+
}
|
|
236
279
|
}
|
|
237
280
|
//# sourceMappingURL=ExecutionContext.js.map
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Checkpoint serialization for crash recovery.
|
|
3
|
+
*
|
|
4
|
+
* Writes workflow state to disk after each node completes. If the process
|
|
5
|
+
* crashes, the checkpoint file persists and can be used to resume execution
|
|
6
|
+
* from the last completed node.
|
|
7
|
+
*
|
|
8
|
+
* Checkpoint files live in .fw-checkpoints/ next to the workflow file and
|
|
9
|
+
* are auto-deleted after successful workflow completion.
|
|
10
|
+
*/
|
|
11
|
+
import type { GeneratedExecutionContext, ExecutionInfo } from './ExecutionContext.js';
|
|
12
|
+
/** Marker stored in place of values that couldn't be serialized */
|
|
13
|
+
export interface UnserializableMarker {
|
|
14
|
+
__fw_unserializable__: true;
|
|
15
|
+
nodeId: string;
|
|
16
|
+
portName: string;
|
|
17
|
+
reason: string;
|
|
18
|
+
}
|
|
19
|
+
export interface CheckpointData {
|
|
20
|
+
/** Format version for forward compatibility */
|
|
21
|
+
version: 1;
|
|
22
|
+
/** SHA-256 hash of the workflow source file */
|
|
23
|
+
workflowHash: string;
|
|
24
|
+
/** Workflow function name */
|
|
25
|
+
workflowName: string;
|
|
26
|
+
/** Original file path */
|
|
27
|
+
filePath: string;
|
|
28
|
+
/** Input parameters the workflow was called with */
|
|
29
|
+
params: Record<string, unknown>;
|
|
30
|
+
/** ISO 8601 timestamp */
|
|
31
|
+
timestamp: string;
|
|
32
|
+
/** Node IDs in completion order */
|
|
33
|
+
completedNodes: string[];
|
|
34
|
+
/** Full topological execution order */
|
|
35
|
+
executionOrder: string[];
|
|
36
|
+
/** Current position in execution order */
|
|
37
|
+
position: number;
|
|
38
|
+
/** Serialized variables: key -> value */
|
|
39
|
+
variables: Record<string, unknown>;
|
|
40
|
+
/** Execution info for completed nodes */
|
|
41
|
+
executions: Record<string, ExecutionInfo>;
|
|
42
|
+
/** Execution counter value */
|
|
43
|
+
executionCounter: number;
|
|
44
|
+
/** Per-node execution counts */
|
|
45
|
+
nodeExecutionCounts: Record<string, number>;
|
|
46
|
+
/** Nodes whose outputs couldn't be fully serialized */
|
|
47
|
+
unsafeNodes: string[];
|
|
48
|
+
}
|
|
49
|
+
export declare class CheckpointWriter {
|
|
50
|
+
private dir;
|
|
51
|
+
private filePath;
|
|
52
|
+
private workflowName;
|
|
53
|
+
private runId;
|
|
54
|
+
private params;
|
|
55
|
+
private workflowHash;
|
|
56
|
+
private checkpointPath;
|
|
57
|
+
private writeLock;
|
|
58
|
+
constructor(workflowFilePath: string, workflowName: string, runId: string, params?: Record<string, unknown>);
|
|
59
|
+
/**
|
|
60
|
+
* Write a checkpoint after a node completes. Uses a write lock so
|
|
61
|
+
* concurrent calls from parallel nodes are serialized.
|
|
62
|
+
*/
|
|
63
|
+
write(completedNodes: string[], executionOrder: string[], position: number, ctx: GeneratedExecutionContext): Promise<void>;
|
|
64
|
+
/** Clean up checkpoint file after successful completion */
|
|
65
|
+
cleanup(): void;
|
|
66
|
+
getCheckpointPath(): string;
|
|
67
|
+
private _writeCheckpoint;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Load a checkpoint file and validate it against the current workflow.
|
|
71
|
+
* Returns the checkpoint data and a list of nodes that need to be re-run
|
|
72
|
+
* (because their outputs weren't serializable).
|
|
73
|
+
*/
|
|
74
|
+
export declare function loadCheckpoint(checkpointPath: string, workflowFilePath?: string): {
|
|
75
|
+
data: CheckpointData;
|
|
76
|
+
stale: boolean;
|
|
77
|
+
rerunNodes: string[];
|
|
78
|
+
skipNodes: Map<string, Record<string, unknown>>;
|
|
79
|
+
};
|
|
80
|
+
/**
|
|
81
|
+
* Find the most recent checkpoint file for a workflow.
|
|
82
|
+
*/
|
|
83
|
+
export declare function findLatestCheckpoint(workflowFilePath: string, workflowName?: string): string | null;
|
|
84
|
+
//# sourceMappingURL=checkpoint.d.ts.map
|