@lhi/n8m 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +105 -6
- package/dist/agentic/graph.d.ts +50 -0
- package/dist/agentic/graph.js +0 -2
- package/dist/agentic/nodes/architect.d.ts +5 -0
- package/dist/agentic/nodes/architect.js +8 -22
- package/dist/agentic/nodes/engineer.d.ts +15 -0
- package/dist/agentic/nodes/engineer.js +25 -4
- package/dist/agentic/nodes/qa.d.ts +1 -0
- package/dist/agentic/nodes/qa.js +280 -45
- package/dist/agentic/nodes/reviewer.d.ts +4 -0
- package/dist/agentic/nodes/reviewer.js +71 -13
- package/dist/agentic/nodes/supervisor.js +2 -3
- package/dist/agentic/state.d.ts +1 -0
- package/dist/agentic/state.js +4 -0
- package/dist/commands/create.js +37 -3
- package/dist/commands/doc.js +1 -1
- package/dist/commands/fixture.d.ts +12 -0
- package/dist/commands/fixture.js +258 -0
- package/dist/commands/test.d.ts +63 -4
- package/dist/commands/test.js +1179 -90
- package/dist/fixture-schema.json +162 -0
- package/dist/resources/node-definitions-fallback.json +185 -8
- package/dist/resources/node-test-hints.json +188 -0
- package/dist/resources/workflow-test-fixtures.json +42 -0
- package/dist/services/ai.service.d.ts +42 -0
- package/dist/services/ai.service.js +271 -21
- package/dist/services/node-definitions.service.d.ts +1 -0
- package/dist/services/node-definitions.service.js +4 -11
- package/dist/utils/config.js +2 -0
- package/dist/utils/fixtureManager.d.ts +28 -0
- package/dist/utils/fixtureManager.js +41 -0
- package/dist/utils/n8nClient.d.ts +27 -0
- package/dist/utils/n8nClient.js +169 -5
- package/dist/utils/spinner.d.ts +17 -0
- package/dist/utils/spinner.js +52 -0
- package/oclif.manifest.json +49 -1
- package/package.json +2 -2
package/dist/commands/test.js
CHANGED
|
@@ -5,10 +5,15 @@ import { N8nClient } from '../utils/n8nClient.js';
|
|
|
5
5
|
import { ConfigManager } from '../utils/config.js';
|
|
6
6
|
import { AIService } from '../services/ai.service.js';
|
|
7
7
|
import { DocService } from '../services/doc.service.js';
|
|
8
|
+
import { Spinner } from '../utils/spinner.js';
|
|
8
9
|
import { runAgenticWorkflow, graph, resumeAgenticWorkflow } from '../agentic/graph.js';
|
|
10
|
+
import { FixtureManager } from '../utils/fixtureManager.js';
|
|
9
11
|
import * as path from 'path';
|
|
10
12
|
import * as fs from 'fs/promises';
|
|
11
|
-
import { existsSync } from 'fs';
|
|
13
|
+
import { existsSync, readFileSync } from 'fs';
|
|
14
|
+
import { fileURLToPath } from 'url';
|
|
15
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
16
|
+
const __dirname = path.dirname(__filename);
|
|
12
17
|
export default class Test extends Command {
|
|
13
18
|
static args = {
|
|
14
19
|
workflow: Args.string({
|
|
@@ -41,6 +46,10 @@ export default class Test extends Command {
|
|
|
41
46
|
default: false,
|
|
42
47
|
description: 'Generate 3 diverse AI test scenarios (happy path, edge case, error)',
|
|
43
48
|
}),
|
|
49
|
+
fixture: Flags.string({
|
|
50
|
+
char: 'f',
|
|
51
|
+
description: 'Path to a fixture JSON file to use for offline testing',
|
|
52
|
+
}),
|
|
44
53
|
};
|
|
45
54
|
async run() {
|
|
46
55
|
const { args, flags } = await this.parse(Test);
|
|
@@ -57,20 +66,10 @@ export default class Test extends Command {
|
|
|
57
66
|
}
|
|
58
67
|
const client = new N8nClient({ apiUrl: n8nUrl, apiKey: n8nKey });
|
|
59
68
|
const aiService = AIService.getInstance();
|
|
60
|
-
//
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
if (validNodeTypes.length > 0) {
|
|
65
|
-
this.log(theme.success(`✔ Loaded ${validNodeTypes.length} valid node types.`));
|
|
66
|
-
}
|
|
67
|
-
else {
|
|
68
|
-
this.log(theme.warn('⚠ Could not load node types. Validation/Shimming will be limited.'));
|
|
69
|
-
}
|
|
70
|
-
}
|
|
71
|
-
catch (e) {
|
|
72
|
-
this.log(theme.warn(`⚠ Failed to fetch node types: ${e.message}`));
|
|
73
|
-
}
|
|
69
|
+
// Node-type validation uses the local fallback definitions bundled with the
|
|
70
|
+
// project. We don't attempt a live fetch from the n8n instance because the
|
|
71
|
+
// /node-types endpoint is not available on all versions.
|
|
72
|
+
const validNodeTypes = [];
|
|
74
73
|
let createdWorkflowId = null;
|
|
75
74
|
const deployedDefinitions = new Map(); // TempId -> Original JSON (for patching)
|
|
76
75
|
let globalSuccess = false;
|
|
@@ -356,86 +355,150 @@ export default class Test extends Command {
|
|
|
356
355
|
patchNodes(workflowData.nodes);
|
|
357
356
|
}
|
|
358
357
|
}
|
|
359
|
-
// ---
|
|
360
|
-
// --- AGENTIC WORKFLOW EXECUTION ---
|
|
358
|
+
// --- AGENTIC VALIDATION ---
|
|
361
359
|
this.log(theme.subHeader('AGENTIC VALIDATION'));
|
|
362
|
-
this.log(theme.agent("Initializing Agentic Workflow to validate/repair this workflow..."));
|
|
363
|
-
const goal = `Validate and fix the workflow named "${workflowName}"`;
|
|
364
360
|
let testScenarios = [];
|
|
365
361
|
if (flags['ai-scenarios']) {
|
|
362
|
+
const goalForScenarios = `Validate and fix the workflow named "${workflowName}"`;
|
|
366
363
|
this.log(theme.agent("Generating AI test scenarios..."));
|
|
367
|
-
testScenarios = await aiService.generateTestScenarios(workflowData,
|
|
364
|
+
testScenarios = await aiService.generateTestScenarios(workflowData, goalForScenarios);
|
|
368
365
|
this.log(theme.muted(`Generated ${testScenarios.length} scenarios.`));
|
|
369
366
|
}
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
// Check if paused
|
|
386
|
-
const snapshot = await graph.getState({ configurable: { thread_id: ephemeralThreadId } });
|
|
387
|
-
if (snapshot.next && snapshot.next.length > 0) {
|
|
388
|
-
if (flags.headless) {
|
|
389
|
-
this.log(theme.info("Headless mode active. Auto-resuming..."));
|
|
390
|
-
result = await resumeAgenticWorkflow(ephemeralThreadId);
|
|
367
|
+
if (rootRealTargetId) {
|
|
368
|
+
// REMOTE WORKFLOW: test against the real instance workflow — no ephemeral copy, no shim.
|
|
369
|
+
// Credentials are already configured on the instance; no need to strip them.
|
|
370
|
+
const fixtureManager = new FixtureManager();
|
|
371
|
+
const validateOnly = flags['validate-only'];
|
|
372
|
+
let directResult;
|
|
373
|
+
const fixtureFlagPath = flags['fixture'];
|
|
374
|
+
if (fixtureFlagPath) {
|
|
375
|
+
// --fixture flag: load from explicit path, run offline immediately (no prompt)
|
|
376
|
+
const fixture = fixtureManager.loadFromPath(fixtureFlagPath);
|
|
377
|
+
if (!fixture) {
|
|
378
|
+
this.log(theme.fail(`Could not load fixture from: ${fixtureFlagPath}`));
|
|
379
|
+
return;
|
|
380
|
+
}
|
|
381
|
+
directResult = await this.testWithFixture(fixture, workflowName, aiService);
|
|
391
382
|
}
|
|
392
383
|
else {
|
|
393
|
-
const
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
|
|
384
|
+
const capturedDate = fixtureManager.getCapturedDate(rootRealTargetId);
|
|
385
|
+
if (capturedDate && !validateOnly) {
|
|
386
|
+
const dateStr = capturedDate.toLocaleString('en-US', {
|
|
387
|
+
year: 'numeric', month: 'short', day: 'numeric',
|
|
388
|
+
hour: '2-digit', minute: '2-digit',
|
|
389
|
+
});
|
|
390
|
+
const { useFixture } = await inquirer.prompt([{
|
|
391
|
+
type: 'confirm',
|
|
392
|
+
name: 'useFixture',
|
|
393
|
+
message: `Fixture found from ${dateStr}. Run offline?`,
|
|
394
|
+
default: true,
|
|
395
|
+
}]);
|
|
396
|
+
if (useFixture) {
|
|
397
|
+
const fixture = fixtureManager.load(rootRealTargetId);
|
|
398
|
+
directResult = await this.testWithFixture(fixture, workflowName, aiService);
|
|
399
|
+
}
|
|
400
|
+
else {
|
|
401
|
+
directResult = await this.testRemoteWorkflowDirectly(rootRealTargetId, workflowData, workflowName, client, aiService, n8nUrl, testScenarios);
|
|
402
|
+
if (directResult.passed && !validateOnly) {
|
|
403
|
+
await this.offerSaveFixture(fixtureManager, rootRealTargetId, workflowName, directResult.finalWorkflow ?? workflowData, directResult.lastExecution);
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
else if (capturedDate && validateOnly) {
|
|
408
|
+
// validate-only + fixture: use fixture silently (no prompt)
|
|
409
|
+
const fixture = fixtureManager.load(rootRealTargetId);
|
|
410
|
+
directResult = await this.testWithFixture(fixture, workflowName, aiService);
|
|
401
411
|
}
|
|
402
412
|
else {
|
|
403
|
-
this.
|
|
404
|
-
|
|
413
|
+
directResult = await this.testRemoteWorkflowDirectly(rootRealTargetId, workflowData, workflowName, client, aiService, n8nUrl, testScenarios);
|
|
414
|
+
if (directResult.passed && !validateOnly) {
|
|
415
|
+
await this.offerSaveFixture(fixtureManager, rootRealTargetId, workflowName, directResult.finalWorkflow ?? workflowData, directResult.lastExecution);
|
|
416
|
+
}
|
|
405
417
|
}
|
|
406
418
|
}
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
// The graph result uses the same structure as Engineer: { workflows: [...] } or just workflowJson object
|
|
414
|
-
let fixedWorkflow = result.workflowJson;
|
|
415
|
-
// If it's wrapped in a workflows array (Multi-workflow support), take the first one for now
|
|
416
|
-
if (result.workflowJson.workflows && Array.isArray(result.workflowJson.workflows)) {
|
|
417
|
-
fixedWorkflow = result.workflowJson.workflows[0];
|
|
418
|
-
}
|
|
419
|
-
const finalName = fixedWorkflow.name || workflowName;
|
|
420
|
-
deployedDefinitions.set('agentic-result', {
|
|
421
|
-
name: finalName,
|
|
422
|
-
data: fixedWorkflow,
|
|
419
|
+
if (directResult.passed) {
|
|
420
|
+
globalSuccess = true;
|
|
421
|
+
// Use finalWorkflow from the result (already in-memory — no extra API call).
|
|
422
|
+
deployedDefinitions.set('remote-result', {
|
|
423
|
+
name: workflowName,
|
|
424
|
+
data: directResult.finalWorkflow ?? workflowData,
|
|
423
425
|
type: 'root',
|
|
424
|
-
realId: rootRealTargetId
|
|
426
|
+
realId: rootRealTargetId,
|
|
425
427
|
});
|
|
426
428
|
}
|
|
429
|
+
else {
|
|
430
|
+
this.log(theme.fail(`Validation failed — ${directResult.errors.length} issue(s).`));
|
|
431
|
+
directResult.errors.forEach((e) => this.log(theme.muted(` ↳ ${e}`)));
|
|
432
|
+
}
|
|
427
433
|
}
|
|
428
434
|
else {
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
435
|
+
// LOCAL FILE: use the agentic graph for structural validation and repair.
|
|
436
|
+
const goal = `Validate and fix the workflow named "${workflowName}"`;
|
|
437
|
+
const initialState = {
|
|
438
|
+
userGoal: goal,
|
|
439
|
+
messages: [],
|
|
440
|
+
validationErrors: [],
|
|
441
|
+
workflowJson: workflowData,
|
|
442
|
+
availableNodeTypes: validNodeTypes,
|
|
443
|
+
testScenarios: testScenarios
|
|
444
|
+
};
|
|
445
|
+
const ephemeralThreadId = `test-${Date.now()}`;
|
|
446
|
+
let result = await runAgenticWorkflow(goal, initialState, ephemeralThreadId);
|
|
447
|
+
// HITL Handling: Loop until graph reaches END (handles self-correction cycles)
|
|
448
|
+
// Each repair iteration creates a new interrupt before engineer, so we need to keep resuming.
|
|
449
|
+
const MAX_RESUMES = 8; // architect + 2 engineers + reviewer + fix loop iterations
|
|
450
|
+
for (let i = 0; i < MAX_RESUMES; i++) {
|
|
451
|
+
const snapshot = await graph.getState({ configurable: { thread_id: ephemeralThreadId } });
|
|
452
|
+
if (!snapshot.next || snapshot.next.length === 0)
|
|
453
|
+
break; // Graph reached END
|
|
454
|
+
if (flags.headless) {
|
|
455
|
+
result = await resumeAgenticWorkflow(ephemeralThreadId);
|
|
456
|
+
}
|
|
457
|
+
else {
|
|
458
|
+
const nodeLabel = snapshot.next.join(', ');
|
|
459
|
+
const isQa = nodeLabel.includes('qa');
|
|
460
|
+
const { resume } = await inquirer.prompt([{
|
|
461
|
+
type: 'confirm',
|
|
462
|
+
name: 'resume',
|
|
463
|
+
message: isQa ? 'Reviewer passed blueprint. Proceed to QA Execution?' : `Paused before: ${nodeLabel}. Continue?`,
|
|
464
|
+
default: true
|
|
465
|
+
}]);
|
|
466
|
+
if (resume) {
|
|
467
|
+
result = await resumeAgenticWorkflow(ephemeralThreadId);
|
|
468
|
+
}
|
|
469
|
+
else {
|
|
470
|
+
this.log(theme.warn("Test aborted by user."));
|
|
471
|
+
return;
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
if (result.validationStatus === 'passed') {
|
|
476
|
+
globalSuccess = true;
|
|
477
|
+
if (result.workflowJson) {
|
|
478
|
+
let fixedWorkflow = result.workflowJson;
|
|
479
|
+
if (result.workflowJson.workflows && Array.isArray(result.workflowJson.workflows)) {
|
|
480
|
+
fixedWorkflow = result.workflowJson.workflows[0];
|
|
481
|
+
}
|
|
482
|
+
const finalName = fixedWorkflow.name || workflowName;
|
|
483
|
+
deployedDefinitions.set('agentic-result', {
|
|
484
|
+
name: finalName,
|
|
485
|
+
data: fixedWorkflow,
|
|
486
|
+
type: 'root',
|
|
487
|
+
realId: undefined,
|
|
488
|
+
});
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
else {
|
|
492
|
+
const errors = result.validationErrors ?? [];
|
|
493
|
+
this.log(theme.fail(`Validation failed — ${errors.length} unresolved issue${errors.length === 1 ? '' : 's'}.`));
|
|
494
|
+
errors.forEach((err) => this.log(theme.muted(` ↳ ${err}`)));
|
|
432
495
|
}
|
|
433
496
|
}
|
|
434
497
|
}
|
|
435
498
|
catch (error) {
|
|
436
499
|
const errMsg = this.cleanErrorMsg(error.message);
|
|
437
|
-
this.log(theme.fail(`Validation
|
|
438
|
-
this.log(theme.
|
|
500
|
+
this.log(theme.fail(`Validation failed — unhandled error`));
|
|
501
|
+
this.log(theme.muted(` ↳ ${errMsg}`));
|
|
439
502
|
process.exitCode = 1;
|
|
440
503
|
if (flags['keep-on-fail'] && createdWorkflowId) {
|
|
441
504
|
this.log(theme.warn(`PRESERVATION ACTIVE: Workflow ${createdWorkflowId} persists.`));
|
|
@@ -498,22 +561,6 @@ export default class Test extends Command {
|
|
|
498
561
|
catch { /* intentionally empty */ }
|
|
499
562
|
return errMsg;
|
|
500
563
|
}
|
|
501
|
-
/**
|
|
502
|
-
* Normalize error messages to catch "similar" errors (masking IDs/numbers)
|
|
503
|
-
*/
|
|
504
|
-
normalizeError(msg) {
|
|
505
|
-
const normalized = msg.toLowerCase();
|
|
506
|
-
// Group all unrecognized node type errors
|
|
507
|
-
if (normalized.includes('unrecognized node type')) {
|
|
508
|
-
return 'unrecognized node type';
|
|
509
|
-
}
|
|
510
|
-
return normalized
|
|
511
|
-
.replace(/\b[a-f0-9-]{36}\b/g, 'ID')
|
|
512
|
-
.replace(/\b[a-f0-9]{24}\b/g, 'ID')
|
|
513
|
-
.replace(/\b\d+\b/g, 'N')
|
|
514
|
-
.replace(/\s+/g, ' ')
|
|
515
|
-
.trim();
|
|
516
|
-
}
|
|
517
564
|
sanitizeWorkflow(data) {
|
|
518
565
|
// n8n API is extremely picky during UPDATE/CREATE.
|
|
519
566
|
// properties like 'meta', 'pinData', 'tags', and 'versionId' often cause 400 Bad Request
|
|
@@ -543,7 +590,7 @@ export default class Test extends Command {
|
|
|
543
590
|
}
|
|
544
591
|
return { ...workflowData, nodes, connections };
|
|
545
592
|
}
|
|
546
|
-
async saveWorkflows(deployedDefinitions,
|
|
593
|
+
async saveWorkflows(deployedDefinitions, _originalPath) {
|
|
547
594
|
if (deployedDefinitions.size === 0)
|
|
548
595
|
return;
|
|
549
596
|
const { save } = await inquirer.prompt([{
|
|
@@ -608,6 +655,859 @@ export default class Test extends Command {
|
|
|
608
655
|
await this.saveWorkflows(deployedDefinitions, originalPath);
|
|
609
656
|
}
|
|
610
657
|
}
|
|
658
|
+
/**
|
|
659
|
+
* Test a workflow that already exists on the n8n instance, using its real credentials
|
|
660
|
+
* and configured triggers — no ephemeral copy, no credential stripping, no shim injection.
|
|
661
|
+
*/
|
|
662
|
+
async testRemoteWorkflowDirectly(workflowId, workflowData, workflowName, client, aiService, n8nUrl, testScenarios) {
|
|
663
|
+
const nodes = (workflowData.nodes || []).filter(Boolean);
|
|
664
|
+
const validationErrors = [];
|
|
665
|
+
let lastFullExec = null;
|
|
666
|
+
let finalWorkflow = workflowData;
|
|
667
|
+
const webhookNode = nodes.find((n) => n.type === 'n8n-nodes-base.webhook' && !n.disabled);
|
|
668
|
+
if (webhookNode) {
|
|
669
|
+
const webhookPath = webhookNode.parameters?.path;
|
|
670
|
+
if (!webhookPath) {
|
|
671
|
+
return { passed: false, errors: ['Webhook node has no path configured.'] };
|
|
672
|
+
}
|
|
673
|
+
const currentWorkflow = await client.getWorkflow(workflowId);
|
|
674
|
+
finalWorkflow = currentWorkflow;
|
|
675
|
+
const wasActive = currentWorkflow.active === true;
|
|
676
|
+
// Strip any [n8m:shim] nodes left in the workflow from a previous test run.
|
|
677
|
+
// Re-wires connections back through by replacing shim references with the
|
|
678
|
+
// shim's own target (shim → B becomes the restored A → B).
|
|
679
|
+
{
|
|
680
|
+
const leftoverShims = currentWorkflow.nodes.filter((n) => n.name?.startsWith('[n8m:shim]'));
|
|
681
|
+
if (leftoverShims.length > 0) {
|
|
682
|
+
for (const shim of leftoverShims) {
|
|
683
|
+
const shimTarget = ((currentWorkflow.connections[shim.name]?.main ?? [])[0]?.[0])?.node;
|
|
684
|
+
if (shimTarget) {
|
|
685
|
+
for (const targets of Object.values(currentWorkflow.connections)) {
|
|
686
|
+
for (const segment of (targets?.main ?? [])) {
|
|
687
|
+
if (!Array.isArray(segment))
|
|
688
|
+
continue;
|
|
689
|
+
for (const conn of segment) {
|
|
690
|
+
if (conn?.node === shim.name)
|
|
691
|
+
conn.node = shimTarget;
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
delete currentWorkflow.connections[shim.name];
|
|
697
|
+
}
|
|
698
|
+
currentWorkflow.nodes = currentWorkflow.nodes.filter((n) => !n.name?.startsWith('[n8m:shim]'));
|
|
699
|
+
try {
|
|
700
|
+
await client.updateWorkflow(workflowId, {
|
|
701
|
+
name: currentWorkflow.name,
|
|
702
|
+
nodes: currentWorkflow.nodes,
|
|
703
|
+
connections: currentWorkflow.connections,
|
|
704
|
+
settings: currentWorkflow.settings || {},
|
|
705
|
+
});
|
|
706
|
+
}
|
|
707
|
+
catch { /* cleanup best-effort */ }
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
// Auto-fix control characters in node parameters before testing.
|
|
711
|
+
// Control chars (e.g. literal newlines in a Slack blocksUi field) are workflow
|
|
712
|
+
// configuration bugs — they cause "could not be parsed" errors at execution time
|
|
713
|
+
// regardless of what the test payload contains. Stripping them is always safe.
|
|
714
|
+
const { changed: nodeParamFixed, data: fixedWorkflowNodes } = this.sanitizeWorkflowNodeParams(currentWorkflow);
|
|
715
|
+
if (nodeParamFixed) {
|
|
716
|
+
try {
|
|
717
|
+
await client.updateWorkflow(workflowId, {
|
|
718
|
+
name: currentWorkflow.name,
|
|
719
|
+
nodes: fixedWorkflowNodes,
|
|
720
|
+
connections: currentWorkflow.connections,
|
|
721
|
+
settings: currentWorkflow.settings || {},
|
|
722
|
+
});
|
|
723
|
+
currentWorkflow.nodes = fixedWorkflowNodes; // keep in-memory consistent
|
|
724
|
+
this.log(theme.muted('Fixed control character encoding in node parameters.'));
|
|
725
|
+
}
|
|
726
|
+
catch { /* update failed — test may still encounter encoding errors */ }
|
|
727
|
+
}
|
|
728
|
+
// Proactively detect and repair Execute Command nodes whose shell scripts
|
|
729
|
+
// had their newlines stripped by an older version of this tool. The
|
|
730
|
+
// telltale sign is: no \n in the command but at least one "\ " (backslash
|
|
731
|
+
// followed by whitespace) — the remnant of a multiline line-continuation.
|
|
732
|
+
{
|
|
733
|
+
const collapsedNodes = currentWorkflow.nodes.filter((n) => n.type === 'n8n-nodes-base.executeCommand' &&
|
|
734
|
+
typeof n.parameters?.command === 'string' &&
|
|
735
|
+
!n.parameters.command.includes('\n') &&
|
|
736
|
+
/\\\s/.test(n.parameters.command));
|
|
737
|
+
if (collapsedNodes.length > 0) {
|
|
738
|
+
let anyRepaired = false;
|
|
739
|
+
for (const node of collapsedNodes) {
|
|
740
|
+
try {
|
|
741
|
+
this.log(theme.agent(`Repairing collapsed shell script in "${node.name}"...`));
|
|
742
|
+
node.parameters.command = await aiService.fixExecuteCommandScript(node.parameters.command);
|
|
743
|
+
anyRepaired = true;
|
|
744
|
+
this.log(theme.muted(`"${node.name}" script restored.`));
|
|
745
|
+
}
|
|
746
|
+
catch { /* repair failed — test continues without fix */ }
|
|
747
|
+
}
|
|
748
|
+
if (anyRepaired) {
|
|
749
|
+
try {
|
|
750
|
+
await client.updateWorkflow(workflowId, {
|
|
751
|
+
name: currentWorkflow.name,
|
|
752
|
+
nodes: currentWorkflow.nodes,
|
|
753
|
+
connections: currentWorkflow.connections,
|
|
754
|
+
settings: currentWorkflow.settings || {},
|
|
755
|
+
});
|
|
756
|
+
}
|
|
757
|
+
catch { /* update failed — repaired version stays in-memory only */ }
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
// Detect binary-source nodes and inject a test PNG as pin data so
|
|
762
|
+
// upload steps receive real binary content instead of an empty buffer.
|
|
763
|
+
const binarySourceNodes = this.findBinarySourceNodes(workflowData);
|
|
764
|
+
const existingPinData = currentWorkflow.pinData || {};
|
|
765
|
+
let testPinDataInjected = false;
|
|
766
|
+
if (binarySourceNodes.length > 0) {
|
|
767
|
+
// Try to fetch a real test image from a placeholder service; fall back
|
|
768
|
+
// to a bundled 1×1 PNG if the remote service is unreachable.
|
|
769
|
+
// Never fetch external image services during testing — use bundled 1×1 PNG.
|
|
770
|
+
const testImageBase64 = 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==';
|
|
771
|
+
const testFileSize = '68';
|
|
772
|
+
const testPinData = { ...existingPinData };
|
|
773
|
+
for (const nodeName of binarySourceNodes) {
|
|
774
|
+
testPinData[nodeName] = [{
|
|
775
|
+
json: { fileName: 'n8m-test.png', mimeType: 'image/png' },
|
|
776
|
+
binary: {
|
|
777
|
+
data: {
|
|
778
|
+
data: testImageBase64,
|
|
779
|
+
mimeType: 'image/png',
|
|
780
|
+
fileName: 'n8m-test.png',
|
|
781
|
+
fileSize: testFileSize,
|
|
782
|
+
fileExtension: 'png',
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
}];
|
|
786
|
+
}
|
|
787
|
+
try {
|
|
788
|
+
await client.setPinData(workflowId, currentWorkflow, testPinData);
|
|
789
|
+
testPinDataInjected = true;
|
|
790
|
+
this.log(theme.muted(`Test binary pinned to: ${binarySourceNodes.join(', ')}`));
|
|
791
|
+
}
|
|
792
|
+
catch {
|
|
793
|
+
// setPinData is unsupported on some n8n versions (REST API schema rejects
|
|
794
|
+
// pinData as an additional property). This is a graceful degradation —
|
|
795
|
+
// the test continues without binary injection.
|
|
796
|
+
this.log(theme.muted(`Binary injection skipped (not supported by this n8n version)`));
|
|
797
|
+
}
|
|
798
|
+
}
|
|
799
|
+
if (!wasActive) {
|
|
800
|
+
try {
|
|
801
|
+
await client.activateWorkflow(workflowId);
|
|
802
|
+
}
|
|
803
|
+
catch (err) {
|
|
804
|
+
return { passed: false, errors: [`Activation failed: ${err.message}`] };
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
// Track pre-shim state so the finally block can restore the workflow.
|
|
808
|
+
let preShimNodes = null;
|
|
809
|
+
let preShimConnections = null;
|
|
810
|
+
// Shim all external-network nodes so the test never calls real external services.
|
|
811
|
+
// Saves original state upfront — the binary-shim logic inside the try block
|
|
812
|
+
// checks `preShimNodes === null` to avoid double-saving, so this must come first.
|
|
813
|
+
{
|
|
814
|
+
const shimmed = N8nClient.shimNetworkNodes(currentWorkflow.nodes);
|
|
815
|
+
if (shimmed.some((n, i) => n !== currentWorkflow.nodes[i])) {
|
|
816
|
+
preShimNodes = JSON.parse(JSON.stringify(currentWorkflow.nodes));
|
|
817
|
+
preShimConnections = JSON.parse(JSON.stringify(currentWorkflow.connections ?? {}));
|
|
818
|
+
currentWorkflow.nodes = shimmed;
|
|
819
|
+
this.log(theme.muted('External service nodes shimmed for test isolation.'));
|
|
820
|
+
try {
|
|
821
|
+
await client.updateWorkflow(workflowId, {
|
|
822
|
+
name: currentWorkflow.name,
|
|
823
|
+
nodes: currentWorkflow.nodes,
|
|
824
|
+
connections: currentWorkflow.connections,
|
|
825
|
+
settings: currentWorkflow.settings || {},
|
|
826
|
+
});
|
|
827
|
+
}
|
|
828
|
+
catch { /* shimming update failed — proceed without network isolation */ }
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
try {
|
|
832
|
+
const nodeNames = nodes.map((n) => n.name).join(', ');
|
|
833
|
+
// Scan expressions so we know exactly which body fields this workflow needs.
|
|
834
|
+
const requiredFields = this.extractRequiredBodyFields(workflowData);
|
|
835
|
+
const fieldsHint = requiredFields.length > 0
|
|
836
|
+
? `\nThe workflow's expressions reference these $json.body fields: ${requiredFields.join(', ')}\nYour payload MUST include ALL of these as top-level keys.`
|
|
837
|
+
: '';
|
|
838
|
+
// Detect body fields that feed image URLs into HTTP Request binary nodes.
|
|
839
|
+
// When found, the prompt instructs the AI to use a real hosted image URL so
|
|
840
|
+
// n8n fetches actual binary data and the upload step receives real bytes.
|
|
841
|
+
const binaryUrlFields = this.findBinaryUrlFields(workflowData);
|
|
842
|
+
const binaryUrlHint = binaryUrlFields.length > 0
|
|
843
|
+
? `\nField(s) [${binaryUrlFields.join(', ')}] are used as image/file URLs by HTTP Request nodes whose output is uploaded to Slack or another service. For these fields you MUST supply a real, publicly accessible image URL — use https://placehold.co/100x100.png as the value.`
|
|
844
|
+
: '';
|
|
845
|
+
// Node type hints describe internal node parameters — only include them
|
|
846
|
+
// during self-healing where the specific error gives context. Injecting
|
|
847
|
+
// them into the INITIAL prompt confuses the AI into generating Slack/HTTP
|
|
848
|
+
// node params as webhook body fields instead of the actual body fields.
|
|
849
|
+
const nodeTypeHints = this.extractNodeTypeHints(workflowData);
|
|
850
|
+
// Check for a user-defined fixture payload for this workflow.
|
|
851
|
+
const fixture = this.loadWorkflowFixture(workflowId, workflowName);
|
|
852
|
+
let scenarios = testScenarios;
|
|
853
|
+
if (!scenarios || scenarios.length === 0) {
|
|
854
|
+
let mockPayload;
|
|
855
|
+
if (fixture) {
|
|
856
|
+
mockPayload = fixture;
|
|
857
|
+
}
|
|
858
|
+
else {
|
|
859
|
+
// n8n webhook wrapping: POST {"field": "value"} → node sees $json.body.field.
|
|
860
|
+
// Never nest payload under "body" — n8n does that automatically.
|
|
861
|
+
const context = `You are generating a test payload to POST to an n8n Webhook node.
|
|
862
|
+
n8n wraps the POST body automatically: POST {"content":"x"} → $json.body.content = "x".
|
|
863
|
+
NEVER nest under "body". Output a SINGLE flat JSON object.${fieldsHint}${binaryUrlHint}
|
|
864
|
+
Workflow: "${workflowName}", Nodes: ${nodeNames}`;
|
|
865
|
+
mockPayload = this.sanitizeMockPayload(await aiService.generateMockData(context));
|
|
866
|
+
}
|
|
867
|
+
scenarios = [{ name: 'Default Test', payload: mockPayload }];
|
|
868
|
+
}
|
|
869
|
+
const baseUrl = new URL(n8nUrl).origin;
|
|
870
|
+
const webhookUrl = `${baseUrl}/webhook/${webhookPath}`;
|
|
871
|
+
for (const scenario of scenarios) {
|
|
872
|
+
this.log(theme.agent(`Testing: ${scenario.name}`));
|
|
873
|
+
let currentPayload = scenario.payload;
|
|
874
|
+
let scenarioPassed = false;
|
|
875
|
+
let lastError = null;
|
|
876
|
+
let fixAttempted = false;
|
|
877
|
+
let binaryShimInjected = false;
|
|
878
|
+
let codeNodeFixApplied = false; // tracks whether a code_node_js fix was actually committed
|
|
879
|
+
let codeNodeFixAppliedName;
|
|
880
|
+
let mockDataShimApplied = false; // tracks whether a mock-data shim replaced the Code node
|
|
881
|
+
// Healing loop: up to 5 rounds (initial + regen + fix + mock-shim + downstream).
|
|
882
|
+
// Each round the AI evaluates the error and decides the remediation action.
|
|
883
|
+
for (let healRound = 0; healRound < 5; healRound++) {
|
|
884
|
+
// Record time BEFORE the POST so executions from earlier attempts
|
|
885
|
+
// (which may have finished late) are excluded from this attempt's poll.
|
|
886
|
+
const executionStartTime = Date.now();
|
|
887
|
+
const response = await fetch(webhookUrl, {
|
|
888
|
+
method: 'POST',
|
|
889
|
+
headers: { 'Content-Type': 'application/json' },
|
|
890
|
+
body: JSON.stringify(currentPayload),
|
|
891
|
+
});
|
|
892
|
+
if (!response.ok) {
|
|
893
|
+
lastError = `HTTP ${response.status} from webhook`;
|
|
894
|
+
break;
|
|
895
|
+
}
|
|
896
|
+
let executionFound = false;
|
|
897
|
+
// Poll up to 3 min — Slack/LLM workflows can take 60–90 s end-to-end.
|
|
898
|
+
Spinner.start('Waiting for execution result');
|
|
899
|
+
let trackedExecId;
|
|
900
|
+
for (let i = 0; i < 60; i++) {
|
|
901
|
+
await new Promise(r => setTimeout(r, 3000));
|
|
902
|
+
let fullExec;
|
|
903
|
+
if (trackedExecId) {
|
|
904
|
+
fullExec = await client.getExecution(trackedExecId);
|
|
905
|
+
}
|
|
906
|
+
else {
|
|
907
|
+
const executions = await client.getWorkflowExecutions(workflowId);
|
|
908
|
+
const recentExec = executions.find((e) => e.startedAt && new Date(e.startedAt).getTime() >= executionStartTime);
|
|
909
|
+
if (!recentExec)
|
|
910
|
+
continue;
|
|
911
|
+
trackedExecId = recentExec.id;
|
|
912
|
+
fullExec = await client.getExecution(trackedExecId);
|
|
913
|
+
}
|
|
914
|
+
lastFullExec = fullExec;
|
|
915
|
+
if (fullExec.status === 'running' || fullExec.status === 'waiting')
|
|
916
|
+
continue;
|
|
917
|
+
executionFound = true;
|
|
918
|
+
Spinner.stop();
|
|
919
|
+
if (fullExec.status === 'success') {
|
|
920
|
+
this.log(theme.done('Passed'));
|
|
921
|
+
scenarioPassed = true;
|
|
922
|
+
lastError = null;
|
|
923
|
+
}
|
|
924
|
+
else {
|
|
925
|
+
const execError = fullExec.data?.resultData?.error;
|
|
926
|
+
const nodeRef = execError?.node;
|
|
927
|
+
let failingNode = typeof nodeRef === 'string' ? nodeRef : nodeRef?.name ?? nodeRef?.type;
|
|
928
|
+
let rawMsg = execError?.message;
|
|
929
|
+
const topDesc = execError?.description ?? execError?.cause?.message;
|
|
930
|
+
if (rawMsg && topDesc && !rawMsg.includes(topDesc))
|
|
931
|
+
rawMsg = `${rawMsg} — ${topDesc}`;
|
|
932
|
+
if (!rawMsg) {
|
|
933
|
+
const runData = fullExec.data?.resultData?.runData;
|
|
934
|
+
if (runData) {
|
|
935
|
+
outer: for (const [nodeName, nodeRuns] of Object.entries(runData)) {
|
|
936
|
+
for (const run of nodeRuns) {
|
|
937
|
+
if (run?.error?.message) {
|
|
938
|
+
failingNode = failingNode ?? nodeName;
|
|
939
|
+
rawMsg = run.error.message;
|
|
940
|
+
const desc = run.error.description ?? run.error.cause?.message;
|
|
941
|
+
if (desc && !rawMsg.includes(desc))
|
|
942
|
+
rawMsg = `${rawMsg} — ${desc}`;
|
|
943
|
+
break outer;
|
|
944
|
+
}
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
}
|
|
949
|
+
rawMsg = rawMsg || 'Unknown flow failure';
|
|
950
|
+
lastError = failingNode ? `[${failingNode}] ${rawMsg}` : rawMsg;
|
|
951
|
+
this.log(theme.fail(`Failed: ${lastError}`));
|
|
952
|
+
}
|
|
953
|
+
break;
|
|
954
|
+
}
|
|
955
|
+
if (!executionFound) {
|
|
956
|
+
Spinner.stop();
|
|
957
|
+
lastError = 'Execution timed out (still running after 3 min). Check n8n for result.';
|
|
958
|
+
this.log(theme.warn(lastError));
|
|
959
|
+
}
|
|
960
|
+
if (scenarioPassed || !lastError)
|
|
961
|
+
break;
|
|
962
|
+
// Let the AI decide what to do with the error
|
|
963
|
+
const errSnapshot = lastError;
|
|
964
|
+
const nodeNameMatch = errSnapshot.match(/^\[([^\]]+)\]/);
|
|
965
|
+
const failNodeName = nodeNameMatch?.[1];
|
|
966
|
+
const failingNodeForEval = failNodeName
|
|
967
|
+
? currentWorkflow.nodes.find((n) => n.name === failNodeName)
|
|
968
|
+
: null;
|
|
969
|
+
const failingNodeCode = (failingNodeForEval?.type === 'n8n-nodes-base.code' && failingNodeForEval?.parameters?.jsCode)
|
|
970
|
+
? failingNodeForEval.parameters.jsCode
|
|
971
|
+
: undefined;
|
|
972
|
+
const evaluation = await aiService.evaluateTestError(errSnapshot, currentWorkflow.nodes, failNodeName, failingNodeCode);
|
|
973
|
+
if (evaluation.action === 'structural_pass') {
|
|
974
|
+
this.log(theme.warn(`${evaluation.reason}: ${errSnapshot}`));
|
|
975
|
+
this.log(theme.done('Structural validation passed.'));
|
|
976
|
+
scenarioPassed = true;
|
|
977
|
+
break;
|
|
978
|
+
}
|
|
979
|
+
if (evaluation.action === 'regenerate_payload') {
|
|
980
|
+
this.log(theme.agent(`Self-healing: regenerating test payload...`));
|
|
981
|
+
const context = `You are generating a test payload to POST to an n8n Webhook node.
|
|
982
|
+
n8n wraps the POST body automatically: POST {"X":"v"} → $json.body.X = "v".
|
|
983
|
+
NEVER nest under "body". Output a SINGLE flat JSON object.${requiredFields.length > 0 ? `\nRequired top-level keys: ${requiredFields.join(', ')}` : ''}${binaryUrlHint}${nodeTypeHints}
|
|
984
|
+
Workflow: "${workflowName}", Nodes: ${nodeNames}
|
|
985
|
+
Previous error: "${errSnapshot}"`;
|
|
986
|
+
currentPayload = this.sanitizeMockPayload(await aiService.generateMockData(context));
|
|
987
|
+
lastError = null;
|
|
988
|
+
continue; // retry with regenerated payload
|
|
989
|
+
}
|
|
990
|
+
if (evaluation.action === 'fix_node' && !fixAttempted) {
|
|
991
|
+
const targetName = evaluation.targetNodeName ?? failNodeName;
|
|
992
|
+
let fixed = false;
|
|
993
|
+
if (evaluation.nodeFixType === 'code_node_js') {
|
|
994
|
+
const targetNode = currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.code' && (!targetName || n.name === targetName)) ?? currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.code');
|
|
995
|
+
if (targetNode?.parameters?.jsCode) {
|
|
996
|
+
try {
|
|
997
|
+
this.log(theme.agent(`Auto-fixing Code node "${targetNode.name}"...`));
|
|
998
|
+
targetNode.parameters.jsCode = await aiService.fixCodeNodeJavaScript(targetNode.parameters.jsCode, errSnapshot);
|
|
999
|
+
await client.updateWorkflow(workflowId, {
|
|
1000
|
+
name: currentWorkflow.name,
|
|
1001
|
+
nodes: currentWorkflow.nodes,
|
|
1002
|
+
connections: currentWorkflow.connections,
|
|
1003
|
+
settings: currentWorkflow.settings || {},
|
|
1004
|
+
});
|
|
1005
|
+
fixed = true;
|
|
1006
|
+
fixAttempted = true;
|
|
1007
|
+
codeNodeFixApplied = true;
|
|
1008
|
+
codeNodeFixAppliedName = targetNode.name;
|
|
1009
|
+
lastError = null;
|
|
1010
|
+
this.log(theme.muted('Code node updated. Retesting...'));
|
|
1011
|
+
}
|
|
1012
|
+
catch { /* fix failed — fall through */ }
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
else if (evaluation.nodeFixType === 'execute_command') {
|
|
1016
|
+
const targetNode = currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.executeCommand' && (!targetName || n.name === targetName)) ?? currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.executeCommand');
|
|
1017
|
+
if (targetNode?.parameters?.command) {
|
|
1018
|
+
try {
|
|
1019
|
+
this.log(theme.agent(`Auto-fixing Execute Command script in "${targetNode.name}"...`));
|
|
1020
|
+
targetNode.parameters.command = await aiService.fixExecuteCommandScript(targetNode.parameters.command, errSnapshot);
|
|
1021
|
+
await client.updateWorkflow(workflowId, {
|
|
1022
|
+
name: currentWorkflow.name,
|
|
1023
|
+
nodes: currentWorkflow.nodes,
|
|
1024
|
+
connections: currentWorkflow.connections,
|
|
1025
|
+
settings: currentWorkflow.settings || {},
|
|
1026
|
+
});
|
|
1027
|
+
fixed = true;
|
|
1028
|
+
fixAttempted = true;
|
|
1029
|
+
lastError = null;
|
|
1030
|
+
this.log(theme.muted('Execute Command script updated. Retesting...'));
|
|
1031
|
+
}
|
|
1032
|
+
catch { /* fix failed — fall through */ }
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
else if (evaluation.nodeFixType === 'binary_field') {
|
|
1036
|
+
const fieldMatch = errSnapshot.match(/has no binary field ['"]?(\w+)['"]?/i);
|
|
1037
|
+
const expectedField = fieldMatch?.[1];
|
|
1038
|
+
const failingNode = targetName
|
|
1039
|
+
? currentWorkflow.nodes.find((n) => n.name === targetName)
|
|
1040
|
+
: null;
|
|
1041
|
+
// Delegate binary-field tracing to the AI — it traces the full graph
|
|
1042
|
+
// (handling passthrough nodes like Merge, Set, IF) to find the actual
|
|
1043
|
+
// binary-producing node and the field name it outputs.
|
|
1044
|
+
this.log(theme.agent(`Tracing binary data flow to infer correct field name for "${targetName ?? failNodeName}"...`));
|
|
1045
|
+
const wfConnections = currentWorkflow.connections || workflowData.connections || {};
|
|
1046
|
+
const correctField = await aiService.inferBinaryFieldNameFromWorkflow(targetName ?? failNodeName ?? 'unknown', currentWorkflow.nodes, wfConnections);
|
|
1047
|
+
if (failingNode && expectedField && correctField && correctField !== expectedField) {
|
|
1048
|
+
// Scan every string parameter — the key name varies by node type
|
|
1049
|
+
const paramKey = Object.entries(failingNode.parameters || {})
|
|
1050
|
+
.find(([, v]) => typeof v === 'string' && v === expectedField)?.[0];
|
|
1051
|
+
if (paramKey) {
|
|
1052
|
+
try {
|
|
1053
|
+
this.log(theme.agent(`Fixing binary field in "${targetName}": '${expectedField}' → '${correctField}' (${paramKey})...`));
|
|
1054
|
+
failingNode.parameters[paramKey] = correctField;
|
|
1055
|
+
await client.updateWorkflow(workflowId, {
|
|
1056
|
+
name: currentWorkflow.name,
|
|
1057
|
+
nodes: currentWorkflow.nodes,
|
|
1058
|
+
connections: currentWorkflow.connections,
|
|
1059
|
+
settings: currentWorkflow.settings || {},
|
|
1060
|
+
});
|
|
1061
|
+
fixed = true;
|
|
1062
|
+
fixAttempted = true;
|
|
1063
|
+
lastError = null;
|
|
1064
|
+
this.log(theme.muted('Binary field name updated. Retesting...'));
|
|
1065
|
+
}
|
|
1066
|
+
catch { /* ignore */ }
|
|
1067
|
+
}
|
|
1068
|
+
}
|
|
1069
|
+
if (!fixed) {
|
|
1070
|
+
// Inject a Code node shim that produces synthetic binary data so the
|
|
1071
|
+
// downstream node can actually execute instead of structural-passing.
|
|
1072
|
+
const shimField = correctField ?? expectedField ?? 'data';
|
|
1073
|
+
this.log(theme.agent(`Injecting binary test shim for field "${shimField}" before "${targetName ?? failNodeName}"...`));
|
|
1074
|
+
try {
|
|
1075
|
+
// Save original state before we mutate — restored in finally.
|
|
1076
|
+
if (preShimNodes === null) {
|
|
1077
|
+
preShimNodes = JSON.parse(JSON.stringify(currentWorkflow.nodes));
|
|
1078
|
+
preShimConnections = JSON.parse(JSON.stringify(currentWorkflow.connections ?? {}));
|
|
1079
|
+
}
|
|
1080
|
+
const shimCode = aiService.generateBinaryShimCode(shimField);
|
|
1081
|
+
const shimName = `[n8m:shim] Binary for ${targetName ?? failNodeName}`;
|
|
1082
|
+
const shimPos = failingNode?.position ?? [500, 300];
|
|
1083
|
+
const shimNode = {
|
|
1084
|
+
id: `shim-binary-${Date.now()}`,
|
|
1085
|
+
name: shimName,
|
|
1086
|
+
type: 'n8n-nodes-base.code',
|
|
1087
|
+
typeVersion: 2,
|
|
1088
|
+
position: [shimPos[0] - 220, shimPos[1]],
|
|
1089
|
+
parameters: { mode: 'runOnceForAllItems', jsCode: shimCode },
|
|
1090
|
+
};
|
|
1091
|
+
// Rewire: redirect connections pointing at the failing node to the shim,
|
|
1092
|
+
// then add shim → failing node.
|
|
1093
|
+
const failName = targetName ?? failNodeName ?? '';
|
|
1094
|
+
const conns = JSON.parse(JSON.stringify(currentWorkflow.connections ?? {}));
|
|
1095
|
+
for (const targets of Object.values(conns)) {
|
|
1096
|
+
for (const segment of (targets?.main ?? [])) {
|
|
1097
|
+
if (!Array.isArray(segment))
|
|
1098
|
+
continue;
|
|
1099
|
+
for (const conn of segment) {
|
|
1100
|
+
if (conn?.node === failName)
|
|
1101
|
+
conn.node = shimName;
|
|
1102
|
+
}
|
|
1103
|
+
}
|
|
1104
|
+
}
|
|
1105
|
+
conns[shimName] = { main: [[{ node: failName, type: 'main', index: 0 }]] };
|
|
1106
|
+
currentWorkflow.nodes = [...currentWorkflow.nodes, shimNode];
|
|
1107
|
+
currentWorkflow.connections = conns;
|
|
1108
|
+
await client.updateWorkflow(workflowId, {
|
|
1109
|
+
name: currentWorkflow.name,
|
|
1110
|
+
nodes: currentWorkflow.nodes,
|
|
1111
|
+
connections: currentWorkflow.connections,
|
|
1112
|
+
settings: currentWorkflow.settings || {},
|
|
1113
|
+
});
|
|
1114
|
+
fixed = true;
|
|
1115
|
+
fixAttempted = true;
|
|
1116
|
+
binaryShimInjected = true;
|
|
1117
|
+
lastError = null;
|
|
1118
|
+
this.log(theme.muted('Binary shim injected. Retesting...'));
|
|
1119
|
+
}
|
|
1120
|
+
catch {
|
|
1121
|
+
// Shim generation/injection failed — fall through to structural pass
|
|
1122
|
+
this.log(theme.warn(`Binary data not available in test environment (upstream pipeline required): ${errSnapshot}`));
|
|
1123
|
+
this.log(theme.done('Structural validation passed.'));
|
|
1124
|
+
scenarioPassed = true;
|
|
1125
|
+
}
|
|
1126
|
+
}
|
|
1127
|
+
}
|
|
1128
|
+
if (fixed)
|
|
1129
|
+
continue; // retry with fix applied
|
|
1130
|
+
}
|
|
1131
|
+
// A Code node still fails after its JS was patched.
|
|
1132
|
+
// Try replacing it with hardcoded mock data so downstream nodes
|
|
1133
|
+
// (e.g. Slack at the end of the flow) can still be exercised.
|
|
1134
|
+
if (codeNodeFixApplied && !mockDataShimApplied) {
|
|
1135
|
+
const shimTarget = currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.code' && n.name === codeNodeFixAppliedName);
|
|
1136
|
+
if (shimTarget?.parameters?.jsCode) {
|
|
1137
|
+
this.log(theme.agent(`"${codeNodeFixAppliedName}" still fails — replacing with mock data to continue test...`));
|
|
1138
|
+
try {
|
|
1139
|
+
if (preShimNodes === null) {
|
|
1140
|
+
preShimNodes = JSON.parse(JSON.stringify(currentWorkflow.nodes));
|
|
1141
|
+
preShimConnections = JSON.parse(JSON.stringify(currentWorkflow.connections ?? {}));
|
|
1142
|
+
}
|
|
1143
|
+
shimTarget.parameters.jsCode = await aiService.shimCodeNodeWithMockData(shimTarget.parameters.jsCode);
|
|
1144
|
+
await client.updateWorkflow(workflowId, {
|
|
1145
|
+
name: currentWorkflow.name,
|
|
1146
|
+
nodes: currentWorkflow.nodes,
|
|
1147
|
+
connections: currentWorkflow.connections,
|
|
1148
|
+
settings: currentWorkflow.settings || {},
|
|
1149
|
+
});
|
|
1150
|
+
mockDataShimApplied = true;
|
|
1151
|
+
lastError = null;
|
|
1152
|
+
this.log(theme.muted(`"${codeNodeFixAppliedName}" replaced with mock data. Retesting...`));
|
|
1153
|
+
continue;
|
|
1154
|
+
}
|
|
1155
|
+
catch { /* fall through to structural pass */ }
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
1158
|
+
if (codeNodeFixApplied || mockDataShimApplied) {
|
|
1159
|
+
this.log(theme.warn(`Code node "${codeNodeFixAppliedName ?? failNodeName ?? 'unknown'}" relies on external APIs unavailable in test environment.`));
|
|
1160
|
+
this.log(theme.done('Structural validation passed.'));
|
|
1161
|
+
scenarioPassed = true;
|
|
1162
|
+
break;
|
|
1163
|
+
}
|
|
1164
|
+
// Binary-field errors that survive the fix block (e.g. second round after a
|
|
1165
|
+
// successful fix, or fixAttempted was already true) still indicate a
|
|
1166
|
+
// test-environment limitation — binary data requires the full upstream pipeline.
|
|
1167
|
+
if (!scenarioPassed && evaluation.nodeFixType === 'binary_field') {
|
|
1168
|
+
this.log(theme.warn(`Binary data not available in test environment (upstream pipeline required): ${errSnapshot}`));
|
|
1169
|
+
this.log(theme.done('Structural validation passed.'));
|
|
1170
|
+
scenarioPassed = true;
|
|
1171
|
+
break;
|
|
1172
|
+
}
|
|
1173
|
+
// If a binary shim was successfully injected and the downstream node now
|
|
1174
|
+
// fails with an external API / credentials error (Invalid URL, auth failure,
|
|
1175
|
+
// etc.), that is a test-environment limitation — the binary pipeline is valid.
|
|
1176
|
+
if (!scenarioPassed && binaryShimInjected) {
|
|
1177
|
+
this.log(theme.warn(`External service error after binary shim (credentials/API required): ${errSnapshot}`));
|
|
1178
|
+
this.log(theme.done('Structural validation passed.'));
|
|
1179
|
+
scenarioPassed = true;
|
|
1180
|
+
break;
|
|
1181
|
+
}
|
|
1182
|
+
// escalate, or fix failed / already attempted
|
|
1183
|
+
if (!scenarioPassed) {
|
|
1184
|
+
validationErrors.push(`Scenario "${scenario.name}" Failed: ${lastError}`);
|
|
1185
|
+
}
|
|
1186
|
+
break;
|
|
1187
|
+
} // end healRound
|
|
1188
|
+
}
|
|
1189
|
+
}
|
|
1190
|
+
finally {
|
|
1191
|
+
// Restore original pin data (remove our test binary injection)
|
|
1192
|
+
if (testPinDataInjected) {
|
|
1193
|
+
try {
|
|
1194
|
+
await client.setPinData(workflowId, currentWorkflow, existingPinData);
|
|
1195
|
+
}
|
|
1196
|
+
catch { /* intentionally empty */ }
|
|
1197
|
+
}
|
|
1198
|
+
// Remove injected shim nodes and restore original connections.
|
|
1199
|
+
if (preShimNodes !== null) {
|
|
1200
|
+
try {
|
|
1201
|
+
await client.updateWorkflow(workflowId, {
|
|
1202
|
+
name: currentWorkflow.name,
|
|
1203
|
+
nodes: preShimNodes,
|
|
1204
|
+
connections: preShimConnections,
|
|
1205
|
+
settings: currentWorkflow.settings || {},
|
|
1206
|
+
});
|
|
1207
|
+
}
|
|
1208
|
+
catch { /* restore best-effort */ }
|
|
1209
|
+
}
|
|
1210
|
+
if (!wasActive) {
|
|
1211
|
+
try {
|
|
1212
|
+
await client.deactivateWorkflow(workflowId);
|
|
1213
|
+
}
|
|
1214
|
+
catch { /* intentionally empty */ }
|
|
1215
|
+
}
|
|
1216
|
+
}
|
|
1217
|
+
}
|
|
1218
|
+
else {
|
|
1219
|
+
// No webhook trigger — validate structure by checking (or briefly testing) activation.
|
|
1220
|
+
const currentWorkflow = await client.getWorkflow(workflowId);
|
|
1221
|
+
finalWorkflow = currentWorkflow;
|
|
1222
|
+
if (currentWorkflow.active) {
|
|
1223
|
+
this.log(theme.done('Workflow is active — structural validation passed.'));
|
|
1224
|
+
}
|
|
1225
|
+
else {
|
|
1226
|
+
try {
|
|
1227
|
+
await client.activateWorkflow(workflowId);
|
|
1228
|
+
await client.deactivateWorkflow(workflowId);
|
|
1229
|
+
this.log(theme.done('Structural validation passed (activation test succeeded).'));
|
|
1230
|
+
}
|
|
1231
|
+
catch (err) {
|
|
1232
|
+
validationErrors.push(`Structural validation failed: ${err.message}`);
|
|
1233
|
+
this.log(theme.fail(`Structural validation failed: ${err.message}`));
|
|
1234
|
+
}
|
|
1235
|
+
}
|
|
1236
|
+
}
|
|
1237
|
+
return {
|
|
1238
|
+
passed: validationErrors.length === 0,
|
|
1239
|
+
errors: validationErrors,
|
|
1240
|
+
finalWorkflow,
|
|
1241
|
+
lastExecution: lastFullExec,
|
|
1242
|
+
};
|
|
1243
|
+
}
|
|
1244
|
+
/**
|
|
1245
|
+
* Scan a workflow's expressions to find all field names accessed via $json.body.FIELD.
|
|
1246
|
+
* These become required keys in the test POST payload because n8n wraps the body
|
|
1247
|
+
* automatically — a downstream expression $json.body.content needs {"content": ...} in the POST.
|
|
1248
|
+
*/
|
|
1249
|
+
extractRequiredBodyFields(workflowData) {
|
|
1250
|
+
const fields = new Set();
|
|
1251
|
+
const json = JSON.stringify(workflowData);
|
|
1252
|
+
// Matches common n8n expression forms that access POST body fields:
|
|
1253
|
+
// $json.body.field (dot notation)
|
|
1254
|
+
// .json.body.field (node-reference variant: $('X').item.json.body.field)
|
|
1255
|
+
// $json["body"]["field"] (bracket notation)
|
|
1256
|
+
// $json['body']['field'] (bracket notation, single-quoted)
|
|
1257
|
+
const patterns = [
|
|
1258
|
+
/\$json\.body\.([a-zA-Z_]\w*)/g,
|
|
1259
|
+
/\.json\.body\.([a-zA-Z_]\w*)/g,
|
|
1260
|
+
/\$json\[["']body["']\]\[["']([a-zA-Z_]\w*)["']\]/g,
|
|
1261
|
+
/\.json\[["']body["']\]\[["']([a-zA-Z_]\w*)["']\]/g,
|
|
1262
|
+
];
|
|
1263
|
+
for (const pattern of patterns) {
|
|
1264
|
+
let match;
|
|
1265
|
+
while ((match = pattern.exec(json)) !== null) {
|
|
1266
|
+
const field = match[1];
|
|
1267
|
+
// Exclude noise — headers/query/params are read-only webhook meta, not body fields
|
|
1268
|
+
if (field && !['headers', 'query', 'params', 'method', 'path'].includes(field)) {
|
|
1269
|
+
fields.add(field);
|
|
1270
|
+
}
|
|
1271
|
+
}
|
|
1272
|
+
}
|
|
1273
|
+
return Array.from(fields);
|
|
1274
|
+
}
|
|
1275
|
+
/**
|
|
1276
|
+
* Load a pre-defined test payload fixture for a specific workflow.
|
|
1277
|
+
* Checks (in order): ./workflow-test-fixtures.json, ./workflows/test-fixtures.json,
|
|
1278
|
+
* and the bundled src/resources/workflow-test-fixtures.json.
|
|
1279
|
+
* Returns null if no matching fixture is found.
|
|
1280
|
+
*/
|
|
1281
|
+
loadWorkflowFixture(workflowId, workflowName) {
|
|
1282
|
+
const candidatePaths = [
|
|
1283
|
+
path.join(process.cwd(), 'workflow-test-fixtures.json'),
|
|
1284
|
+
path.join(process.cwd(), 'workflows', 'test-fixtures.json'),
|
|
1285
|
+
path.join(__dirname, '..', 'resources', 'workflow-test-fixtures.json'),
|
|
1286
|
+
path.join(__dirname, '..', '..', 'src', 'resources', 'workflow-test-fixtures.json'),
|
|
1287
|
+
];
|
|
1288
|
+
for (const p of candidatePaths) {
|
|
1289
|
+
if (!existsSync(p))
|
|
1290
|
+
continue;
|
|
1291
|
+
try {
|
|
1292
|
+
const fixtures = JSON.parse(readFileSync(p, 'utf8'));
|
|
1293
|
+
// Match by exact ID first, then by name (case-insensitive substring)
|
|
1294
|
+
const byId = fixtures[workflowId];
|
|
1295
|
+
if (byId?.payload)
|
|
1296
|
+
return byId.payload;
|
|
1297
|
+
const nameKey = Object.keys(fixtures).find(k => workflowName.toLowerCase().includes(k.toLowerCase()) ||
|
|
1298
|
+
k.toLowerCase().includes(workflowName.toLowerCase()));
|
|
1299
|
+
if (nameKey && fixtures[nameKey]?.payload)
|
|
1300
|
+
return fixtures[nameKey].payload;
|
|
1301
|
+
}
|
|
1302
|
+
catch { /* skip malformed file */ }
|
|
1303
|
+
}
|
|
1304
|
+
return null;
|
|
1305
|
+
}
|
|
1306
|
+
/**
|
|
1307
|
+
* Load node-test-hints.json and build a context string describing what
|
|
1308
|
+
* data format each node type in the workflow expects. Used to inform
|
|
1309
|
+
* generateMockData so the AI sends correctly-shaped values (e.g. Block Kit
|
|
1310
|
+
* JSON for a Slack blocksUi parameter instead of a plain string).
|
|
1311
|
+
*/
|
|
1312
|
+
extractNodeTypeHints(workflowData) {
|
|
1313
|
+
let hints = {};
|
|
1314
|
+
try {
|
|
1315
|
+
const candidates = [
|
|
1316
|
+
path.join(__dirname, '..', 'resources', 'node-test-hints.json'), // dist/commands → dist/resources
|
|
1317
|
+
path.join(__dirname, '..', '..', 'src', 'resources', 'node-test-hints.json') // dev (ts-node / tsx)
|
|
1318
|
+
];
|
|
1319
|
+
for (const p of candidates) {
|
|
1320
|
+
if (existsSync(p)) {
|
|
1321
|
+
hints = JSON.parse(readFileSync(p, 'utf8'));
|
|
1322
|
+
break;
|
|
1323
|
+
}
|
|
1324
|
+
}
|
|
1325
|
+
}
|
|
1326
|
+
catch { /* hints stay empty */ }
|
|
1327
|
+
if (Object.keys(hints).length === 0)
|
|
1328
|
+
return '';
|
|
1329
|
+
const nodeTypes = [...new Set((workflowData.nodes || []).map((n) => String(n.type)).filter(Boolean))];
|
|
1330
|
+
const lines = [];
|
|
1331
|
+
for (const type of nodeTypes) {
|
|
1332
|
+
const typeHints = hints[type];
|
|
1333
|
+
if (!typeHints)
|
|
1334
|
+
continue;
|
|
1335
|
+
for (const [param, hint] of Object.entries(typeHints)) {
|
|
1336
|
+
if (param.startsWith('_'))
|
|
1337
|
+
continue;
|
|
1338
|
+
lines.push(`Node "${type}" › param "${param}": type=${hint.type}. ${hint.description ?? ''}` +
|
|
1339
|
+
(hint.sample ? ` Sample value: ${hint.sample}` : ''));
|
|
1340
|
+
}
|
|
1341
|
+
}
|
|
1342
|
+
return lines.length > 0
|
|
1343
|
+
? `\nNode-specific parameter requirements:\n${lines.join('\n')}`
|
|
1344
|
+
: '';
|
|
1345
|
+
}
|
|
1346
|
+
/**
|
|
1347
|
+
* Find nodes that feed binary data into upload-type nodes.
|
|
1348
|
+
* Returns node names whose outputs should be pinned with a test binary
|
|
1349
|
+
* so downstream file-upload steps receive real content instead of empty buffers.
|
|
1350
|
+
*/
|
|
1351
|
+
findBinarySourceNodes(workflowData) {
|
|
1352
|
+
const nodes = workflowData.nodes || [];
|
|
1353
|
+
const connections = workflowData.connections || {};
|
|
1354
|
+
// Identify nodes that upload binary (by name or type)
|
|
1355
|
+
const uploadNodeNames = new Set(nodes
|
|
1356
|
+
.filter((n) => /upload.*binary|binary.*upload/i.test(n.name) || /upload/i.test(n.type))
|
|
1357
|
+
.map((n) => n.name));
|
|
1358
|
+
if (uploadNodeNames.size === 0)
|
|
1359
|
+
return [];
|
|
1360
|
+
// Walk the connection graph to find their direct predecessors
|
|
1361
|
+
const sources = new Set();
|
|
1362
|
+
for (const [srcNode, conns] of Object.entries(connections)) {
|
|
1363
|
+
const mainConns = conns.main || [];
|
|
1364
|
+
for (const group of mainConns) {
|
|
1365
|
+
for (const c of (group || [])) {
|
|
1366
|
+
if (uploadNodeNames.has(c.node))
|
|
1367
|
+
sources.add(srcNode);
|
|
1368
|
+
}
|
|
1369
|
+
}
|
|
1370
|
+
}
|
|
1371
|
+
return [...sources];
|
|
1372
|
+
}
|
|
1373
|
+
/**
|
|
1374
|
+
* Detect webhook body fields that are used as image/file URLs by HTTP Request
|
|
1375
|
+
* nodes that sit immediately upstream of binary-upload nodes.
|
|
1376
|
+
*
|
|
1377
|
+
* When n8n fetches a real image URL (supplied via the webhook payload) it gets
|
|
1378
|
+
* actual binary bytes, which then flow into the upload step. Returning these
|
|
1379
|
+
* field names lets the prompt instruct the AI to use a real hosted image URL
|
|
1380
|
+
* (e.g. placehold.co) instead of a placeholder string — so the upload step
|
|
1381
|
+
* receives real binary data without needing pinData API support.
|
|
1382
|
+
*/
|
|
1383
|
+
findBinaryUrlFields(workflowData) {
|
|
1384
|
+
const nodes = workflowData.nodes || [];
|
|
1385
|
+
const connections = workflowData.connections || {};
|
|
1386
|
+
// Identify upload nodes
|
|
1387
|
+
const uploadNodeNames = new Set(nodes
|
|
1388
|
+
.filter((n) => /upload.*binary|binary.*upload/i.test(n.name) || /upload/i.test(n.type))
|
|
1389
|
+
.map((n) => n.name));
|
|
1390
|
+
if (uploadNodeNames.size === 0)
|
|
1391
|
+
return [];
|
|
1392
|
+
// Find their direct predecessors
|
|
1393
|
+
const binarySourceNames = new Set();
|
|
1394
|
+
for (const [srcNode, conns] of Object.entries(connections)) {
|
|
1395
|
+
const mainConns = conns.main || [];
|
|
1396
|
+
for (const group of mainConns) {
|
|
1397
|
+
for (const c of (group || [])) {
|
|
1398
|
+
if (uploadNodeNames.has(c.node))
|
|
1399
|
+
binarySourceNames.add(srcNode);
|
|
1400
|
+
}
|
|
1401
|
+
}
|
|
1402
|
+
}
|
|
1403
|
+
// For each predecessor that is an HTTP Request node, extract body-field
|
|
1404
|
+
// references used in its URL parameter.
|
|
1405
|
+
const nodeMap = new Map(nodes.map((n) => [n.name, n]));
|
|
1406
|
+
const urlFields = new Set();
|
|
1407
|
+
const bodyFieldPattern = [
|
|
1408
|
+
/\$json\.body\.([a-zA-Z_]\w*)/g,
|
|
1409
|
+
/\.json\.body\.([a-zA-Z_]\w*)/g,
|
|
1410
|
+
/\$json\[["']body["']\]\[["']([a-zA-Z_]\w*)["']\]/g,
|
|
1411
|
+
];
|
|
1412
|
+
for (const nodeName of binarySourceNames) {
|
|
1413
|
+
const node = nodeMap.get(nodeName);
|
|
1414
|
+
if (!node || node.type !== 'n8n-nodes-base.httpRequest')
|
|
1415
|
+
continue;
|
|
1416
|
+
const urlParam = node.parameters?.url ?? '';
|
|
1417
|
+
const urlStr = typeof urlParam === 'string' ? urlParam : JSON.stringify(urlParam);
|
|
1418
|
+
for (const pattern of bodyFieldPattern) {
|
|
1419
|
+
let match;
|
|
1420
|
+
while ((match = pattern.exec(urlStr)) !== null) {
|
|
1421
|
+
if (match[1])
|
|
1422
|
+
urlFields.add(match[1]);
|
|
1423
|
+
}
|
|
1424
|
+
}
|
|
1425
|
+
}
|
|
1426
|
+
return [...urlFields];
|
|
1427
|
+
}
|
|
1428
|
+
/**
|
|
1429
|
+
* Deep-scan all node parameter values and strip control characters
|
|
1430
|
+
* (U+0000–U+001F, U+007F). Returns the sanitized nodes array and a flag
|
|
1431
|
+
* indicating whether any changes were made.
|
|
1432
|
+
*
|
|
1433
|
+
* Control chars in node params (e.g. a literal newline inside a Slack
|
|
1434
|
+
* blocksUi JSON string) are workflow configuration bugs — they cause n8n to
|
|
1435
|
+
* throw "could not be parsed" at execution time regardless of the test payload.
|
|
1436
|
+
*/
|
|
1437
|
+
sanitizeWorkflowNodeParams(workflowData) {
|
|
1438
|
+
let changed = false;
|
|
1439
|
+
const deepStrip = (val) => {
|
|
1440
|
+
if (typeof val === 'string') {
|
|
1441
|
+
// eslint-disable-next-line no-control-regex
|
|
1442
|
+
const clean = val.replace(/[\x00-\x1F\x7F]/g, '');
|
|
1443
|
+
if (clean !== val)
|
|
1444
|
+
changed = true;
|
|
1445
|
+
return clean;
|
|
1446
|
+
}
|
|
1447
|
+
if (Array.isArray(val))
|
|
1448
|
+
return val.map(deepStrip);
|
|
1449
|
+
if (val && typeof val === 'object') {
|
|
1450
|
+
for (const k of Object.keys(val))
|
|
1451
|
+
val[k] = deepStrip(val[k]);
|
|
1452
|
+
}
|
|
1453
|
+
return val;
|
|
1454
|
+
};
|
|
1455
|
+
// Deep-clone so we don't mutate the original until we know the update succeeded
|
|
1456
|
+
const nodes = JSON.parse(JSON.stringify(workflowData.nodes || []));
|
|
1457
|
+
for (const node of nodes) {
|
|
1458
|
+
if (!node.parameters)
|
|
1459
|
+
continue;
|
|
1460
|
+
// Skip Code / Function nodes — their jsCode parameters are JavaScript source
|
|
1461
|
+
// that legitimately contains newlines (0x0A). Stripping them destroys the
|
|
1462
|
+
// script syntax (e.g. `const x\nconst y` → `const xconst y` is invalid JS).
|
|
1463
|
+
if (node.type === 'n8n-nodes-base.code' ||
|
|
1464
|
+
node.type === 'n8n-nodes-base.function' ||
|
|
1465
|
+
node.type === 'n8n-nodes-base.functionItem')
|
|
1466
|
+
continue;
|
|
1467
|
+
// Execute Command nodes: deepStrip would strip \n from shell scripts,
|
|
1468
|
+
// collapsing them to one line and making line-continuation backslashes
|
|
1469
|
+
// invalid (/bin/sh: : not found). Only replace U+00A0 → regular space —
|
|
1470
|
+
// AI-generated commands often use non-breaking spaces in indentation which
|
|
1471
|
+
// bash treats as an empty command name.
|
|
1472
|
+
if (node.type === 'n8n-nodes-base.executeCommand') {
|
|
1473
|
+
if (typeof node.parameters.command === 'string') {
|
|
1474
|
+
const fixed = node.parameters.command.replace(/\u00A0/g, ' ');
|
|
1475
|
+
if (fixed !== node.parameters.command) {
|
|
1476
|
+
node.parameters.command = fixed;
|
|
1477
|
+
changed = true;
|
|
1478
|
+
}
|
|
1479
|
+
}
|
|
1480
|
+
continue; // skip deepStrip — would destroy newlines
|
|
1481
|
+
}
|
|
1482
|
+
node.parameters = deepStrip(node.parameters);
|
|
1483
|
+
}
|
|
1484
|
+
return { changed, data: nodes };
|
|
1485
|
+
}
|
|
1486
|
+
/**
|
|
1487
|
+
* Strip control characters (U+0000–U+001F, except tab/LF/CR) from all
|
|
1488
|
+
* string values in a generated mock payload. AI-generated Block Kit JSON
|
|
1489
|
+
* and other rich-text fields sometimes contain raw control chars that cause
|
|
1490
|
+
* n8n's parameter parser to throw "Bad control character in string literal".
|
|
1491
|
+
*/
|
|
1492
|
+
sanitizeMockPayload(data) {
|
|
1493
|
+
if (typeof data === 'string') {
|
|
1494
|
+
// Strip ALL control characters (U+0000–U+001F, U+007F) from test payload strings.
|
|
1495
|
+
// This includes LF/CR — necessary because AI-generated Block Kit JSON often embeds
|
|
1496
|
+
// literal newlines inside stringified JSON values, causing n8n's parser to throw
|
|
1497
|
+
// "Bad control character in string literal in JSON".
|
|
1498
|
+
// eslint-disable-next-line no-control-regex
|
|
1499
|
+
return data.replace(/[\x00-\x1F\x7F]/g, '');
|
|
1500
|
+
}
|
|
1501
|
+
if (Array.isArray(data))
|
|
1502
|
+
return data.map((v) => this.sanitizeMockPayload(v));
|
|
1503
|
+
if (data && typeof data === 'object') {
|
|
1504
|
+
const result = {};
|
|
1505
|
+
for (const [k, v] of Object.entries(data))
|
|
1506
|
+
result[k] = this.sanitizeMockPayload(v);
|
|
1507
|
+
return result;
|
|
1508
|
+
}
|
|
1509
|
+
return data;
|
|
1510
|
+
}
|
|
611
1511
|
async deployWorkflows(deployedDefinitions, client) {
|
|
612
1512
|
for (const [, def] of deployedDefinitions.entries()) {
|
|
613
1513
|
const cleanData = this.sanitizeWorkflow(this.stripShim(def.data));
|
|
@@ -648,4 +1548,193 @@ export default class Test extends Command {
|
|
|
648
1548
|
}
|
|
649
1549
|
}
|
|
650
1550
|
}
|
|
1551
|
+
// ---------------------------------------------------------------------------
|
|
1552
|
+
// Fixture helpers
|
|
1553
|
+
// ---------------------------------------------------------------------------
|
|
1554
|
+
async offerSaveFixture(fixtureManager, workflowId, workflowName, finalWorkflow, lastExecution) {
|
|
1555
|
+
const { saveFixture } = await inquirer.prompt([{
|
|
1556
|
+
type: 'confirm',
|
|
1557
|
+
name: 'saveFixture',
|
|
1558
|
+
message: 'Save fixture for future offline runs?',
|
|
1559
|
+
default: true,
|
|
1560
|
+
}]);
|
|
1561
|
+
if (!saveFixture)
|
|
1562
|
+
return;
|
|
1563
|
+
const executionData = lastExecution
|
|
1564
|
+
? {
|
|
1565
|
+
id: lastExecution.id,
|
|
1566
|
+
status: lastExecution.status,
|
|
1567
|
+
startedAt: lastExecution.startedAt,
|
|
1568
|
+
data: {
|
|
1569
|
+
resultData: {
|
|
1570
|
+
error: lastExecution.data?.resultData?.error ?? null,
|
|
1571
|
+
runData: lastExecution.data?.resultData?.runData ?? {},
|
|
1572
|
+
},
|
|
1573
|
+
},
|
|
1574
|
+
}
|
|
1575
|
+
: { status: 'success', data: { resultData: { runData: {} } } };
|
|
1576
|
+
try {
|
|
1577
|
+
await fixtureManager.save({
|
|
1578
|
+
version: '1.0',
|
|
1579
|
+
capturedAt: new Date().toISOString(),
|
|
1580
|
+
workflowId,
|
|
1581
|
+
workflowName,
|
|
1582
|
+
workflow: finalWorkflow,
|
|
1583
|
+
execution: executionData,
|
|
1584
|
+
});
|
|
1585
|
+
this.log(theme.success(`Fixture saved to .n8m/fixtures/${workflowId}.json`));
|
|
1586
|
+
}
|
|
1587
|
+
catch (e) {
|
|
1588
|
+
this.log(theme.warn(`Could not save fixture: ${e.message}`));
|
|
1589
|
+
}
|
|
1590
|
+
}
|
|
1591
|
+
async testWithFixture(fixture, workflowName, aiService) {
|
|
1592
|
+
this.log(theme.info(`Running offline with fixture data (no n8n API calls).`));
|
|
1593
|
+
const currentWorkflow = JSON.parse(JSON.stringify(fixture.workflow));
|
|
1594
|
+
const execution = fixture.execution;
|
|
1595
|
+
const runData = execution.data?.resultData?.runData ?? {};
|
|
1596
|
+
const validationErrors = [];
|
|
1597
|
+
// Extract error from fixture execution (mirrors live loop logic)
|
|
1598
|
+
let fixtureError = null;
|
|
1599
|
+
if (execution.status !== 'success') {
|
|
1600
|
+
const execError = execution.data?.resultData?.error;
|
|
1601
|
+
const nodeRef = execError?.node;
|
|
1602
|
+
let failingNode = typeof nodeRef === 'string' ? nodeRef : nodeRef?.name ?? nodeRef?.type;
|
|
1603
|
+
let rawMsg = execError?.message ?? '';
|
|
1604
|
+
const topDesc = execError?.description ?? execError?.cause?.message;
|
|
1605
|
+
if (rawMsg && topDesc && !rawMsg.includes(topDesc))
|
|
1606
|
+
rawMsg = `${rawMsg} — ${topDesc}`;
|
|
1607
|
+
if (!rawMsg) {
|
|
1608
|
+
outer: for (const [nodeName, nodeRuns] of Object.entries(runData)) {
|
|
1609
|
+
for (const run of nodeRuns) {
|
|
1610
|
+
if (run?.error?.message) {
|
|
1611
|
+
failingNode = failingNode ?? nodeName;
|
|
1612
|
+
rawMsg = run.error.message;
|
|
1613
|
+
const desc = run.error.description ?? run.error.cause?.message;
|
|
1614
|
+
if (desc && !rawMsg.includes(desc))
|
|
1615
|
+
rawMsg = `${rawMsg} — ${desc}`;
|
|
1616
|
+
break outer;
|
|
1617
|
+
}
|
|
1618
|
+
}
|
|
1619
|
+
}
|
|
1620
|
+
}
|
|
1621
|
+
fixtureError = rawMsg
|
|
1622
|
+
? (failingNode ? `[${failingNode}] ${rawMsg}` : rawMsg)
|
|
1623
|
+
: null;
|
|
1624
|
+
}
|
|
1625
|
+
if (!fixtureError) {
|
|
1626
|
+
this.log(theme.done('Offline fixture: execution was successful.'));
|
|
1627
|
+
return { passed: true, errors: [], finalWorkflow: currentWorkflow };
|
|
1628
|
+
}
|
|
1629
|
+
this.log(theme.agent(`Fixture captured a failure: ${fixtureError}`));
|
|
1630
|
+
const lastError = fixtureError;
|
|
1631
|
+
let scenarioPassed = false;
|
|
1632
|
+
for (let round = 0; round < 5; round++) {
|
|
1633
|
+
this.log(theme.agent(`Offline healing round ${round + 1}: ${lastError}`));
|
|
1634
|
+
const failNodeMatch = lastError.match(/^\[([^\]]+)\]/);
|
|
1635
|
+
const failNodeName = failNodeMatch?.[1];
|
|
1636
|
+
const failingNode = failNodeName
|
|
1637
|
+
? currentWorkflow.nodes.find((n) => n.name === failNodeName)
|
|
1638
|
+
: null;
|
|
1639
|
+
const failingNodeCode = (failingNode?.type === 'n8n-nodes-base.code' && failingNode?.parameters?.jsCode)
|
|
1640
|
+
? failingNode.parameters.jsCode
|
|
1641
|
+
: undefined;
|
|
1642
|
+
const evaluation = await aiService.evaluateTestError(lastError, currentWorkflow.nodes, failNodeName, failingNodeCode);
|
|
1643
|
+
if (evaluation.action === 'structural_pass') {
|
|
1644
|
+
this.log(theme.warn(`Structural pass: ${evaluation.reason}`));
|
|
1645
|
+
scenarioPassed = true;
|
|
1646
|
+
break;
|
|
1647
|
+
}
|
|
1648
|
+
if (evaluation.action === 'escalate') {
|
|
1649
|
+
validationErrors.push(lastError);
|
|
1650
|
+
this.log(theme.fail(`Escalated: ${evaluation.reason}`));
|
|
1651
|
+
break;
|
|
1652
|
+
}
|
|
1653
|
+
if (evaluation.action === 'regenerate_payload') {
|
|
1654
|
+
// Cannot re-run offline — treat as structural pass
|
|
1655
|
+
this.log(theme.warn('Offline: cannot regenerate payload without live execution. Treating as structural pass.'));
|
|
1656
|
+
scenarioPassed = true;
|
|
1657
|
+
break;
|
|
1658
|
+
}
|
|
1659
|
+
if (evaluation.action === 'fix_node') {
|
|
1660
|
+
const targetName = evaluation.targetNodeName ?? failNodeName;
|
|
1661
|
+
if (evaluation.nodeFixType === 'code_node_js') {
|
|
1662
|
+
const target = currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.code' && (!targetName || n.name === targetName)) ?? currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.code');
|
|
1663
|
+
if (target?.parameters?.jsCode) {
|
|
1664
|
+
this.log(theme.agent(`Offline fix: rewriting Code node "${target.name}"...`));
|
|
1665
|
+
const fixedCode = await aiService.fixCodeNodeJavaScript(target.parameters.jsCode, lastError);
|
|
1666
|
+
const predecessorName = this.findPredecessorNode(target.name, currentWorkflow.connections);
|
|
1667
|
+
const inputItems = predecessorName ? (runData[predecessorName] ?? []) : [];
|
|
1668
|
+
const verdict = await aiService.evaluateCodeFixOffline(fixedCode, inputItems, lastError, 'code_node_js');
|
|
1669
|
+
this.log(theme.muted(`Offline eval: ${verdict.wouldPass ? 'PASS' : 'FAIL'} — ${verdict.reason}`));
|
|
1670
|
+
target.parameters.jsCode = fixedCode;
|
|
1671
|
+
this.log(verdict.wouldPass
|
|
1672
|
+
? theme.done(`Offline fix validated: "${target.name}" would succeed.`)
|
|
1673
|
+
: theme.warn(`Fix applied to "${target.name}" — cannot fully verify offline. Treating as structural pass.`));
|
|
1674
|
+
scenarioPassed = true;
|
|
1675
|
+
break;
|
|
1676
|
+
}
|
|
1677
|
+
}
|
|
1678
|
+
if (evaluation.nodeFixType === 'execute_command') {
|
|
1679
|
+
const target = currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.executeCommand' && (!targetName || n.name === targetName)) ?? currentWorkflow.nodes.find((n) => n.type === 'n8n-nodes-base.executeCommand');
|
|
1680
|
+
if (target?.parameters?.command) {
|
|
1681
|
+
this.log(theme.agent(`Offline fix: rewriting Execute Command script in "${target.name}"...`));
|
|
1682
|
+
const fixedCmd = await aiService.fixExecuteCommandScript(target.parameters.command, lastError);
|
|
1683
|
+
const predecessorName = this.findPredecessorNode(target.name, currentWorkflow.connections);
|
|
1684
|
+
const inputItems = predecessorName ? (runData[predecessorName] ?? []) : [];
|
|
1685
|
+
const verdict = await aiService.evaluateCodeFixOffline(fixedCmd, inputItems, lastError, 'execute_command');
|
|
1686
|
+
this.log(theme.muted(`Offline eval: ${verdict.wouldPass ? 'PASS' : 'FAIL'} — ${verdict.reason}`));
|
|
1687
|
+
target.parameters.command = fixedCmd;
|
|
1688
|
+
scenarioPassed = true;
|
|
1689
|
+
break;
|
|
1690
|
+
}
|
|
1691
|
+
}
|
|
1692
|
+
if (evaluation.nodeFixType === 'binary_field') {
|
|
1693
|
+
const target = targetName
|
|
1694
|
+
? currentWorkflow.nodes.find((n) => n.name === targetName)
|
|
1695
|
+
: null;
|
|
1696
|
+
this.log(theme.agent(`Offline fix: tracing binary field for "${targetName ?? failNodeName}"...`));
|
|
1697
|
+
const correctField = await aiService.inferBinaryFieldNameFromWorkflow(targetName ?? failNodeName ?? 'unknown', currentWorkflow.nodes, currentWorkflow.connections ?? {});
|
|
1698
|
+
const fieldMatch = lastError.match(/has no binary field ['"]?(\w+)['"]?/i);
|
|
1699
|
+
const expectedField = fieldMatch?.[1];
|
|
1700
|
+
if (target && expectedField && correctField && correctField !== expectedField) {
|
|
1701
|
+
const paramKey = Object.entries(target.parameters || {})
|
|
1702
|
+
.find(([, v]) => typeof v === 'string' && v === expectedField)?.[0];
|
|
1703
|
+
if (paramKey) {
|
|
1704
|
+
target.parameters[paramKey] = correctField;
|
|
1705
|
+
this.log(theme.muted(`Binary field: '${expectedField}' → '${correctField}'`));
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
this.log(theme.warn('Binary data cannot be verified offline. Treating as structural pass.'));
|
|
1709
|
+
scenarioPassed = true;
|
|
1710
|
+
break;
|
|
1711
|
+
}
|
|
1712
|
+
// fix_node but no matching node found
|
|
1713
|
+
this.log(theme.warn('No fixable node found offline. Treating as structural pass.'));
|
|
1714
|
+
scenarioPassed = true;
|
|
1715
|
+
break;
|
|
1716
|
+
}
|
|
1717
|
+
break;
|
|
1718
|
+
}
|
|
1719
|
+
if (!scenarioPassed) {
|
|
1720
|
+
validationErrors.push(`Offline test failed: ${lastError}`);
|
|
1721
|
+
}
|
|
1722
|
+
return {
|
|
1723
|
+
passed: validationErrors.length === 0,
|
|
1724
|
+
errors: validationErrors,
|
|
1725
|
+
finalWorkflow: currentWorkflow,
|
|
1726
|
+
};
|
|
1727
|
+
}
|
|
1728
|
+
findPredecessorNode(nodeName, connections) {
|
|
1729
|
+
for (const [sourceName, conns] of Object.entries(connections || {})) {
|
|
1730
|
+
const mainConns = conns?.main ?? [];
|
|
1731
|
+
for (const group of mainConns) {
|
|
1732
|
+
for (const c of (group || [])) {
|
|
1733
|
+
if (c?.node === nodeName)
|
|
1734
|
+
return sourceName;
|
|
1735
|
+
}
|
|
1736
|
+
}
|
|
1737
|
+
}
|
|
1738
|
+
return null;
|
|
1739
|
+
}
|
|
651
1740
|
}
|