@lhi/n8m 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +247 -0
- package/bin/dev.js +5 -0
- package/bin/run.js +6 -0
- package/dist/agentic/checkpointer.d.ts +2 -0
- package/dist/agentic/checkpointer.js +14 -0
- package/dist/agentic/graph.d.ts +483 -0
- package/dist/agentic/graph.js +100 -0
- package/dist/agentic/nodes/architect.d.ts +6 -0
- package/dist/agentic/nodes/architect.js +51 -0
- package/dist/agentic/nodes/engineer.d.ts +11 -0
- package/dist/agentic/nodes/engineer.js +182 -0
- package/dist/agentic/nodes/qa.d.ts +5 -0
- package/dist/agentic/nodes/qa.js +151 -0
- package/dist/agentic/nodes/reviewer.d.ts +5 -0
- package/dist/agentic/nodes/reviewer.js +111 -0
- package/dist/agentic/nodes/supervisor.d.ts +6 -0
- package/dist/agentic/nodes/supervisor.js +18 -0
- package/dist/agentic/state.d.ts +51 -0
- package/dist/agentic/state.js +26 -0
- package/dist/commands/config.d.ts +13 -0
- package/dist/commands/config.js +47 -0
- package/dist/commands/create.d.ts +14 -0
- package/dist/commands/create.js +182 -0
- package/dist/commands/deploy.d.ts +13 -0
- package/dist/commands/deploy.js +68 -0
- package/dist/commands/modify.d.ts +13 -0
- package/dist/commands/modify.js +276 -0
- package/dist/commands/prune.d.ts +9 -0
- package/dist/commands/prune.js +98 -0
- package/dist/commands/resume.d.ts +8 -0
- package/dist/commands/resume.js +39 -0
- package/dist/commands/test.d.ts +27 -0
- package/dist/commands/test.js +619 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/services/ai.service.d.ts +51 -0
- package/dist/services/ai.service.js +421 -0
- package/dist/services/n8n.service.d.ts +17 -0
- package/dist/services/n8n.service.js +81 -0
- package/dist/services/node-definitions.service.d.ts +36 -0
- package/dist/services/node-definitions.service.js +102 -0
- package/dist/utils/config.d.ts +15 -0
- package/dist/utils/config.js +25 -0
- package/dist/utils/multilinePrompt.d.ts +1 -0
- package/dist/utils/multilinePrompt.js +52 -0
- package/dist/utils/n8nClient.d.ts +97 -0
- package/dist/utils/n8nClient.js +440 -0
- package/dist/utils/sandbox.d.ts +13 -0
- package/dist/utils/sandbox.js +34 -0
- package/dist/utils/theme.d.ts +23 -0
- package/dist/utils/theme.js +92 -0
- package/oclif.manifest.json +331 -0
- package/package.json +95 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { AIService } from "../../services/ai.service.js";
|
|
2
|
+
export const architectNode = async (state) => {
|
|
3
|
+
const aiService = AIService.getInstance();
|
|
4
|
+
if (!state.userGoal) {
|
|
5
|
+
throw new Error("User goal is missing from state.");
|
|
6
|
+
}
|
|
7
|
+
// Pass-through if we already have a workflow (Repairs/Testing mode)
|
|
8
|
+
// BUT if we have a goal that implies modification, we should probably still generate a spec?
|
|
9
|
+
// For now, let's allow spec generation even if workflowJson exists, so the Engineer can use the spec + old workflow to make new one.
|
|
10
|
+
// The logic in Architect assumes "generateSpec" creates a NEW spec from scratch.
|
|
11
|
+
// We might need a "modifySpec" or just rely on the Engineer to interpret the goal + existing workflow.
|
|
12
|
+
// If we skip the architect, we go straight to Engineer?
|
|
13
|
+
// The graph edges are: START -> architect -> engineer.
|
|
14
|
+
// If we return empty here, 'spec' is undefined in state.
|
|
15
|
+
// Engineer checks state.spec.
|
|
16
|
+
// If we want to support modification, the Architect should probably analyze the request vs the current workflow.
|
|
17
|
+
// However, for the first MVP, if we return empty, the Engineer will run.
|
|
18
|
+
// Does Engineer handle "no spec" but "has workflowJson" + "userGoal"?
|
|
19
|
+
// Let's assume we want the Architect to generate a plan (Spec) for the modification.
|
|
20
|
+
// So we REMOVE this early return, or condition it on "isRepair" vs "isModify".
|
|
21
|
+
// Since we don't have an explicit flag, we can just let it run.
|
|
22
|
+
// The prompt for generateSpec might need to know about the existing workflow?
|
|
23
|
+
// Currently generateSpec only sees the goal.
|
|
24
|
+
// Let's comment it out for now to allow Architect to run.
|
|
25
|
+
// if (state.workflowJson) {
|
|
26
|
+
// return {};
|
|
27
|
+
// }
|
|
28
|
+
try {
|
|
29
|
+
const spec = await aiService.generateSpec(state.userGoal);
|
|
30
|
+
// Check if the spec requires clarification
|
|
31
|
+
const questions = spec.questions;
|
|
32
|
+
const needsClarification = questions && questions.length > 0;
|
|
33
|
+
// For parallelism, we can create a secondary "Alternative" strategy
|
|
34
|
+
// In a real scenario, the LLM would generate these explicitly.
|
|
35
|
+
// Here we simulate it by wrapping the single spec into a strategy list.
|
|
36
|
+
const strategies = [
|
|
37
|
+
{ ...spec, name: "Primary Strategy" },
|
|
38
|
+
// We could ask AI for an alternative here, but for now let's keep it simple to save tokens
|
|
39
|
+
// { ...spec, name: "Alternative Strategy (Robust)" }
|
|
40
|
+
];
|
|
41
|
+
return {
|
|
42
|
+
spec, // Keep backward compatibility for single-path
|
|
43
|
+
strategies,
|
|
44
|
+
needsClarification,
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
console.error("Architect failed:", error);
|
|
49
|
+
throw error;
|
|
50
|
+
}
|
|
51
|
+
};
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { TeamState } from "../state.js";
|
|
2
|
+
export declare const engineerNode: (state: typeof TeamState.State) => Promise<{
|
|
3
|
+
workflowJson: any;
|
|
4
|
+
candidates?: undefined;
|
|
5
|
+
} | {
|
|
6
|
+
workflowJson?: undefined;
|
|
7
|
+
candidates?: undefined;
|
|
8
|
+
} | {
|
|
9
|
+
workflowJson: any;
|
|
10
|
+
candidates: any[];
|
|
11
|
+
}>;
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import { AIService } from "../../services/ai.service.js";
|
|
2
|
+
import { NodeDefinitionsService } from "../../services/node-definitions.service.js";
|
|
3
|
+
export const engineerNode = async (state) => {
|
|
4
|
+
const aiService = AIService.getInstance();
|
|
5
|
+
// RAG: Load and Search Node Definitions
|
|
6
|
+
const nodeService = NodeDefinitionsService.getInstance();
|
|
7
|
+
await nodeService.loadDefinitions();
|
|
8
|
+
// Extract keywords from goal + spec
|
|
9
|
+
const queryText = (state.userGoal + (state.spec ? ` ${state.spec.suggestedName} ${state.spec.description}` : "")).replace(/\n/g, " ");
|
|
10
|
+
// Search for relevant nodes (limit 8 to save context)
|
|
11
|
+
const relevantDefs = nodeService.search(queryText, 8);
|
|
12
|
+
const ragContext = relevantDefs.length > 0
|
|
13
|
+
? `\n\n[AVAILABLE NODE SCHEMAS - USE THESE EXACT PARAMETERS]\n${nodeService.formatForLLM(relevantDefs)}`
|
|
14
|
+
: "";
|
|
15
|
+
if (relevantDefs.length > 0) {
|
|
16
|
+
console.log(`[Engineer] RAG: Found ${relevantDefs.length} relevant node schemas.`);
|
|
17
|
+
}
|
|
18
|
+
// Self-Correction Loop Check
|
|
19
|
+
if (state.validationErrors && state.validationErrors.length > 0) {
|
|
20
|
+
console.log("🔧 Engineer is fixing the workflow based on QA feedback...");
|
|
21
|
+
try {
|
|
22
|
+
// We pass the entire list of errors as context
|
|
23
|
+
const errorContext = state.validationErrors.join('\n');
|
|
24
|
+
// Use the robust fix logic from AIService
|
|
25
|
+
const fixedWorkflow = await aiService.generateWorkflowFix(state.workflowJson, errorContext, undefined, false, state.availableNodeTypes || []);
|
|
26
|
+
return {
|
|
27
|
+
workflowJson: fixedWorkflow,
|
|
28
|
+
// validationErrors will be overwritten by next QA run
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
catch (error) {
|
|
32
|
+
console.error("Engineer failed to fix workflow:", error);
|
|
33
|
+
throw error;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
// Pass-through if workflow exists and no errors (Initial pass for existing workflow)
|
|
37
|
+
if (state.workflowJson) {
|
|
38
|
+
return {};
|
|
39
|
+
}
|
|
40
|
+
// Standard Creation Flow
|
|
41
|
+
// console.log("⚙️ Engineer is building the workflow...");
|
|
42
|
+
if (!state.spec) {
|
|
43
|
+
throw new Error("Workflow specification is missing.");
|
|
44
|
+
}
|
|
45
|
+
try {
|
|
46
|
+
const prompt = `You are an n8n Workflow Engineer.
|
|
47
|
+
Generate the valid n8n workflow JSON(s) based on the following approved Specification.
|
|
48
|
+
|
|
49
|
+
Specification:
|
|
50
|
+
${JSON.stringify(state.spec, null, 2)}
|
|
51
|
+
${ragContext}
|
|
52
|
+
|
|
53
|
+
IMPORTANT:
|
|
54
|
+
1. Desciptive Naming: Name nodes descriptively (e.g. "Fetch Bitcoin Price" instead of "HTTP Request").
|
|
55
|
+
2. Multi-Workflow: If the spec requires multiple workflows (e.g. Main + Sub-workflow), generate them all.
|
|
56
|
+
3. Linking: If one workflow calls another (using an 'Execute Workflow' node), use the "suggestedName" of the target workflow as the 'workflowId' parameter value. Do NOT use generic IDs like "SUBWORKFLOW_ID".
|
|
57
|
+
4. Consistency: Ensure the "name" field in each workflow matches one of the suggestedNames from the spec.
|
|
58
|
+
5. Standard Node Types: Use valid n8n-nodes-base types.
|
|
59
|
+
- Use "n8n-nodes-base.rssFeedRead" for RSS reading (NOT "rssFeed").
|
|
60
|
+
- Use "n8n-nodes-base.httpRequest" for API calls.
|
|
61
|
+
- Use "n8n-nodes-base.openAi" for OpenAI.
|
|
62
|
+
- Use "n8n-nodes-base.googleGemini" for Google Gemini.
|
|
63
|
+
- Use "n8n-nodes-base.htmlExtract" for HTML/Cheerio extraction.
|
|
64
|
+
6. Connections Structure: The "connections" object keys MUST BE THE SOURCE NODE NAME. The "node" field inside the connection array MUST BE THE TARGET NODE NAME.
|
|
65
|
+
7. Connection Nesting: Ensure the correct n8n connection structure: "SourceNodeName": { "main": [ [ { "node": "TargetNodeName", "type": "main", "index": 0 } ] ] }.
|
|
66
|
+
|
|
67
|
+
Output a JSON object with this structure:
|
|
68
|
+
{
|
|
69
|
+
"workflows": [
|
|
70
|
+
{ "name": "Workflow Name", "nodes": [...], "connections": {...} }
|
|
71
|
+
]
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
Output ONLY valid JSON. No commentary. No markdown.
|
|
75
|
+
`;
|
|
76
|
+
// Using AIService just for the LLM call to keep auth logic dry
|
|
77
|
+
const response = await aiService.generateContent(prompt);
|
|
78
|
+
let cleanJson = response || "{}";
|
|
79
|
+
cleanJson = cleanJson.replace(/```json\n?|\n?```/g, "").trim();
|
|
80
|
+
let result;
|
|
81
|
+
try {
|
|
82
|
+
result = JSON.parse(cleanJson);
|
|
83
|
+
}
|
|
84
|
+
catch (e) {
|
|
85
|
+
console.error("Failed to parse workflow JSON from spec", e);
|
|
86
|
+
throw new Error("AI generated invalid JSON for workflow from spec");
|
|
87
|
+
}
|
|
88
|
+
if (result.workflows && Array.isArray(result.workflows)) {
|
|
89
|
+
result.workflows = result.workflows.map((wf) => fixHallucinatedNodes(wf));
|
|
90
|
+
}
|
|
91
|
+
return {
|
|
92
|
+
workflowJson: result,
|
|
93
|
+
// For parallel execution, push to candidates
|
|
94
|
+
candidates: [result],
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
catch (error) {
|
|
98
|
+
console.error("Engineer failed:", error);
|
|
99
|
+
throw error;
|
|
100
|
+
}
|
|
101
|
+
};
|
|
102
|
+
/**
|
|
103
|
+
* Auto-correct common n8n node type hallucinations
|
|
104
|
+
*/
|
|
105
|
+
function fixHallucinatedNodes(workflow) {
|
|
106
|
+
if (!workflow.nodes || !Array.isArray(workflow.nodes))
|
|
107
|
+
return workflow;
|
|
108
|
+
const corrections = {
|
|
109
|
+
"n8n-nodes-base.rssFeed": "n8n-nodes-base.rssFeedRead",
|
|
110
|
+
"rssFeed": "n8n-nodes-base.rssFeedRead",
|
|
111
|
+
"n8n-nodes-base.gpt": "n8n-nodes-base.openAi",
|
|
112
|
+
"n8n-nodes-base.openai": "n8n-nodes-base.openAi",
|
|
113
|
+
"openai": "n8n-nodes-base.openAi",
|
|
114
|
+
"n8n-nodes-base.openAiChat": "n8n-nodes-base.openAi",
|
|
115
|
+
"n8n-nodes-base.openAIChat": "n8n-nodes-base.openAi",
|
|
116
|
+
"n8n-nodes-base.openaiChat": "n8n-nodes-base.openAi",
|
|
117
|
+
"n8n-nodes-base.gemini": "n8n-nodes-base.googleGemini",
|
|
118
|
+
"n8n-nodes-base.cheerioHtml": "n8n-nodes-base.htmlExtract",
|
|
119
|
+
"cheerioHtml": "n8n-nodes-base.htmlExtract",
|
|
120
|
+
"n8n-nodes-base.schedule": "n8n-nodes-base.scheduleTrigger",
|
|
121
|
+
"schedule": "n8n-nodes-base.scheduleTrigger",
|
|
122
|
+
"n8n-nodes-base.cron": "n8n-nodes-base.scheduleTrigger",
|
|
123
|
+
"n8n-nodes-base.googleCustomSearch": "n8n-nodes-base.googleGemini",
|
|
124
|
+
"googleCustomSearch": "n8n-nodes-base.googleGemini"
|
|
125
|
+
};
|
|
126
|
+
workflow.nodes = workflow.nodes.map((node) => {
|
|
127
|
+
if (node.type && corrections[node.type]) {
|
|
128
|
+
node.type = corrections[node.type];
|
|
129
|
+
}
|
|
130
|
+
// Ensure base prefix if missing
|
|
131
|
+
if (node.type && !node.type.startsWith('n8n-nodes-base.') && !node.type.includes('.')) {
|
|
132
|
+
node.type = `n8n-nodes-base.${node.type}`;
|
|
133
|
+
}
|
|
134
|
+
return node;
|
|
135
|
+
});
|
|
136
|
+
return fixN8nConnections(workflow);
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Force-fix connection structure to prevent "object is not iterable" errors
|
|
140
|
+
*/
|
|
141
|
+
function fixN8nConnections(workflow) {
|
|
142
|
+
if (!workflow.connections || typeof workflow.connections !== 'object')
|
|
143
|
+
return workflow;
|
|
144
|
+
const fixedConnections = {};
|
|
145
|
+
for (const [sourceNode, targets] of Object.entries(workflow.connections)) {
|
|
146
|
+
if (!targets || typeof targets !== 'object')
|
|
147
|
+
continue;
|
|
148
|
+
const targetObj = targets;
|
|
149
|
+
// 2. Ensure "main" exists and is an array
|
|
150
|
+
if (targetObj.main) {
|
|
151
|
+
let mainArr = targetObj.main;
|
|
152
|
+
if (!Array.isArray(mainArr))
|
|
153
|
+
mainArr = [[{ node: String(mainArr), type: 'main', index: 0 }]];
|
|
154
|
+
const fixedMain = mainArr.map((segment) => {
|
|
155
|
+
if (!segment)
|
|
156
|
+
return [];
|
|
157
|
+
if (!Array.isArray(segment)) {
|
|
158
|
+
// Wrap in array if it's a single object
|
|
159
|
+
return [segment];
|
|
160
|
+
}
|
|
161
|
+
return segment.map((conn) => {
|
|
162
|
+
if (!conn)
|
|
163
|
+
return { node: 'Unknown', type: 'main', index: 0 };
|
|
164
|
+
if (typeof conn === 'string')
|
|
165
|
+
return { node: conn, type: 'main', index: 0 };
|
|
166
|
+
return {
|
|
167
|
+
node: String(conn.node || 'Unknown'),
|
|
168
|
+
type: conn.type || 'main',
|
|
169
|
+
index: conn.index || 0
|
|
170
|
+
};
|
|
171
|
+
});
|
|
172
|
+
});
|
|
173
|
+
fixedConnections[sourceNode] = { main: fixedMain };
|
|
174
|
+
}
|
|
175
|
+
else {
|
|
176
|
+
// If it's just raw data like { "Source": { "node": "Target" } }, wrap it
|
|
177
|
+
fixedConnections[sourceNode] = targetObj;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
workflow.connections = fixedConnections;
|
|
181
|
+
return workflow;
|
|
182
|
+
}
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import { AIService } from "../../services/ai.service.js";
|
|
2
|
+
import { ConfigManager } from "../../utils/config.js";
|
|
3
|
+
import { N8nClient } from "../../utils/n8nClient.js";
|
|
4
|
+
import { theme } from "../../utils/theme.js";
|
|
5
|
+
export const qaNode = async (state) => {
|
|
6
|
+
const aiService = AIService.getInstance();
|
|
7
|
+
const workflowJson = state.workflowJson;
|
|
8
|
+
if (!workflowJson) {
|
|
9
|
+
throw new Error("No workflow JSON found in state to test.");
|
|
10
|
+
}
|
|
11
|
+
// 1. Load Credentials
|
|
12
|
+
const config = await ConfigManager.load();
|
|
13
|
+
const n8nUrl = config.n8nUrl || process.env.N8N_API_URL;
|
|
14
|
+
const n8nKey = config.n8nKey || process.env.N8N_API_KEY;
|
|
15
|
+
if (!n8nUrl || !n8nKey) {
|
|
16
|
+
throw new Error('Credentials missing. Configure environment via \'n8m config\'.');
|
|
17
|
+
}
|
|
18
|
+
const client = new N8nClient({ apiUrl: n8nUrl, apiKey: n8nKey });
|
|
19
|
+
let createdWorkflowId = null;
|
|
20
|
+
const validationErrors = [];
|
|
21
|
+
try {
|
|
22
|
+
// 2. Prepare Workflow Data (Extract from state structure)
|
|
23
|
+
// engineerNode returns { workflows: [ { name, nodes, connections } ] }
|
|
24
|
+
// Or it might just return the single workflow object if that's how it was implemented.
|
|
25
|
+
// Based on engineer.ts logic: it returns { workflowJson: result } where result matches { workflows: [...] } structure.
|
|
26
|
+
let targetWorkflow = workflowJson;
|
|
27
|
+
if (workflowJson.workflows && Array.isArray(workflowJson.workflows) && workflowJson.workflows.length > 0) {
|
|
28
|
+
targetWorkflow = workflowJson.workflows[0];
|
|
29
|
+
}
|
|
30
|
+
const workflowName = targetWorkflow.name || 'Agentic_Test_Workflow';
|
|
31
|
+
const rootPayload = {
|
|
32
|
+
nodes: targetWorkflow.nodes,
|
|
33
|
+
connections: targetWorkflow.connections,
|
|
34
|
+
settings: targetWorkflow.settings || {},
|
|
35
|
+
staticData: targetWorkflow.staticData || {},
|
|
36
|
+
name: `[n8m:test] ${workflowName}`,
|
|
37
|
+
};
|
|
38
|
+
// Shim trigger if needed (reusing logic from test.ts)
|
|
39
|
+
// CRITICAL: We MUST have a proper Webhook for automated testing to working.
|
|
40
|
+
// Manual Triggers cannot be "activated" via API, and we need activation for validation.
|
|
41
|
+
// So we inject a webhook shim even if a Manual Trigger exists.
|
|
42
|
+
const hasWebhook = rootPayload.nodes.some((n) => n.type === 'n8n-nodes-base.webhook' && !n.disabled);
|
|
43
|
+
if (!hasWebhook) {
|
|
44
|
+
const shimmed = client.injectManualTrigger(rootPayload);
|
|
45
|
+
rootPayload.nodes = shimmed.nodes;
|
|
46
|
+
rootPayload.connections = shimmed.connections;
|
|
47
|
+
}
|
|
48
|
+
// 3. Deploy Ephemeral Workflow
|
|
49
|
+
console.log(theme.agent(`Deploying ephemeral root: ${rootPayload.name}...`));
|
|
50
|
+
const result = await client.createWorkflow(rootPayload.name, rootPayload);
|
|
51
|
+
createdWorkflowId = result.id;
|
|
52
|
+
// 4. Generate Mock Data
|
|
53
|
+
const webhookNode = rootPayload.nodes.find((n) => n.type === 'n8n-nodes-base.webhook');
|
|
54
|
+
let triggerSuccess = false;
|
|
55
|
+
if (webhookNode) {
|
|
56
|
+
const path = webhookNode.parameters?.path;
|
|
57
|
+
if (path) {
|
|
58
|
+
// Activate for webhook testing
|
|
59
|
+
await client.activateWorkflow(createdWorkflowId);
|
|
60
|
+
const nodeNames = targetWorkflow.nodes.map((n) => n.name).join(', ');
|
|
61
|
+
const context = `Workflow Name: "${targetWorkflow.name}"
|
|
62
|
+
Nodes: ${nodeNames}
|
|
63
|
+
Goal: "${state.userGoal}"
|
|
64
|
+
Generate a SINGLE JSON object payload that effectively tests this workflow.`;
|
|
65
|
+
const mockPayload = await aiService.generateMockData(context);
|
|
66
|
+
const baseUrl = new URL(n8nUrl).origin;
|
|
67
|
+
const webhookUrl = `${baseUrl}/webhook/${path}`;
|
|
68
|
+
const response = await fetch(webhookUrl, {
|
|
69
|
+
method: 'POST',
|
|
70
|
+
headers: { 'Content-Type': 'application/json' },
|
|
71
|
+
body: JSON.stringify(mockPayload)
|
|
72
|
+
});
|
|
73
|
+
if (response.ok) {
|
|
74
|
+
triggerSuccess = true;
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
throw new Error(`Webhook trigger failed with status ${response.status}`);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
else {
|
|
82
|
+
// Just execute if no webhook (manual trigger)
|
|
83
|
+
await client.executeWorkflow(createdWorkflowId);
|
|
84
|
+
triggerSuccess = true;
|
|
85
|
+
}
|
|
86
|
+
// 5. Verify Execution
|
|
87
|
+
// Wait for execution to appear
|
|
88
|
+
if (triggerSuccess) {
|
|
89
|
+
const executionStartTime = Date.now();
|
|
90
|
+
let executionFound = false;
|
|
91
|
+
const maxPoll = 20; // shorter poll for agent
|
|
92
|
+
for (let i = 0; i < maxPoll; i++) {
|
|
93
|
+
await new Promise(r => setTimeout(r, 2000));
|
|
94
|
+
const executions = await client.getWorkflowExecutions(createdWorkflowId);
|
|
95
|
+
const recentExec = executions.find((e) => new Date(e.startedAt).getTime() > (executionStartTime - 5000));
|
|
96
|
+
if (recentExec) {
|
|
97
|
+
executionFound = true;
|
|
98
|
+
const fullExec = await client.getExecution(recentExec.id);
|
|
99
|
+
if (fullExec.status === 'success') {
|
|
100
|
+
return {
|
|
101
|
+
validationStatus: 'passed',
|
|
102
|
+
validationErrors: [],
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
else {
|
|
106
|
+
const errorMsg = fullExec.data?.resultData?.error?.message || "Unknown flow failure";
|
|
107
|
+
validationErrors.push(`Execution Failed: ${errorMsg}`);
|
|
108
|
+
console.log(theme.error(`Execution Failed: ${errorMsg}`));
|
|
109
|
+
break;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
if (!executionFound) {
|
|
114
|
+
validationErrors.push("No execution detected after trigger.");
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
// 6. Dynamic Tool Execution (Sandbox)
|
|
118
|
+
// If the Agent has defined a custom validation script, run it now.
|
|
119
|
+
// In the future, the QA agent could generate this script on the fly.
|
|
120
|
+
if (state.customTools && state.customTools['validationScript']) {
|
|
121
|
+
console.log("🛠️ QA is running custom validation script...");
|
|
122
|
+
const script = state.customTools['validationScript'];
|
|
123
|
+
const sandboxResult = (await import('../../utils/sandbox.js')).Sandbox.run(script, {
|
|
124
|
+
workflowJson,
|
|
125
|
+
validationErrors
|
|
126
|
+
});
|
|
127
|
+
if (sandboxResult === false) {
|
|
128
|
+
validationErrors.push("Custom validation script failed.");
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
const errorMsg = error.message;
|
|
134
|
+
console.error(theme.error(`QA Node Error: ${errorMsg}`));
|
|
135
|
+
validationErrors.push(errorMsg);
|
|
136
|
+
}
|
|
137
|
+
finally {
|
|
138
|
+
// Cleanup
|
|
139
|
+
if (createdWorkflowId) {
|
|
140
|
+
try {
|
|
141
|
+
await client.deleteWorkflow(createdWorkflowId);
|
|
142
|
+
console.log(theme.info(`Purged temporary workflow ${createdWorkflowId}`));
|
|
143
|
+
}
|
|
144
|
+
catch { /* intentionally empty */ }
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
return {
|
|
148
|
+
validationStatus: 'failed',
|
|
149
|
+
validationErrors,
|
|
150
|
+
};
|
|
151
|
+
};
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import { theme } from "../../utils/theme.js";
|
|
2
|
+
export const reviewerNode = async (state) => {
|
|
3
|
+
const workflowJson = state.workflowJson;
|
|
4
|
+
const validationErrors = [];
|
|
5
|
+
if (!workflowJson) {
|
|
6
|
+
return {
|
|
7
|
+
validationStatus: 'failed',
|
|
8
|
+
validationErrors: ["No workflow JSON found to review."]
|
|
9
|
+
};
|
|
10
|
+
}
|
|
11
|
+
// Helper to get nodes from potentially nested structure
|
|
12
|
+
let nodes = [];
|
|
13
|
+
let targetWorkflow = workflowJson;
|
|
14
|
+
if (workflowJson.workflows && Array.isArray(workflowJson.workflows) && workflowJson.workflows.length > 0) {
|
|
15
|
+
targetWorkflow = workflowJson.workflows[0];
|
|
16
|
+
}
|
|
17
|
+
if (targetWorkflow.nodes && Array.isArray(targetWorkflow.nodes)) {
|
|
18
|
+
nodes = targetWorkflow.nodes;
|
|
19
|
+
}
|
|
20
|
+
// 1. Check for hallucinated node types
|
|
21
|
+
const knownHallucinations = [
|
|
22
|
+
"rssFeed", "n8n-nodes-base.rssFeed",
|
|
23
|
+
"n8n-nodes-base.gpt", "n8n-nodes-base.openai", "openai",
|
|
24
|
+
"n8n-nodes-base.gemini",
|
|
25
|
+
"cheerioHtml", "n8n-nodes-base.cheerioHtml"
|
|
26
|
+
];
|
|
27
|
+
nodes.forEach(node => {
|
|
28
|
+
if (knownHallucinations.includes(node.type)) {
|
|
29
|
+
console.log(theme.warn(`[Reviewer] Detected hallucinated node type: ${node.type}`));
|
|
30
|
+
validationErrors.push(`Hallucinated node type detected: "${node.type}". Use standard n8n-nodes-base types.`);
|
|
31
|
+
}
|
|
32
|
+
// Check for empty names
|
|
33
|
+
if (!node.name || node.name.trim() === "") {
|
|
34
|
+
validationErrors.push(`Node with type "${node.type}" has an empty name.`);
|
|
35
|
+
}
|
|
36
|
+
// 1.1 Strict Type Check (if available)
|
|
37
|
+
if (state.availableNodeTypes && state.availableNodeTypes.length > 0) {
|
|
38
|
+
if (!state.availableNodeTypes.includes(node.type)) {
|
|
39
|
+
// Double check it's not a known exception or recent core node
|
|
40
|
+
if (!node.type.startsWith('n8n-nodes-base.stick')) { // generic bypass for sticky notes etc if needed
|
|
41
|
+
console.log(theme.warn(`[Reviewer] Node type not found in instance: ${node.type}`));
|
|
42
|
+
validationErrors.push(`Node type "${node.type}" is not available on your n8n instance.`);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
// 2. Check for disconnected nodes (Orphans)
|
|
48
|
+
// Build adjacency list (which nodes are destinations?)
|
|
49
|
+
const destinations = new Set();
|
|
50
|
+
const connections = targetWorkflow.connections || {};
|
|
51
|
+
for (const sourceNode in connections) {
|
|
52
|
+
const outputConfig = connections[sourceNode];
|
|
53
|
+
// iterate over outputs (main, ai_tool, etc)
|
|
54
|
+
for (const outputType in outputConfig) {
|
|
55
|
+
const routes = outputConfig[outputType];
|
|
56
|
+
routes.forEach((route) => {
|
|
57
|
+
route.forEach((connection) => {
|
|
58
|
+
if (connection.node) {
|
|
59
|
+
destinations.add(connection.node);
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
// Iterate nodes to find orphans (non-trigger nodes with no incoming connections)
|
|
66
|
+
// Triggers usually have no input.
|
|
67
|
+
// We use a broader check: any node with "trigger" or "webhook" in the name, plus generic start types.
|
|
68
|
+
const isTrigger = (type) => {
|
|
69
|
+
const lower = type.toLowerCase();
|
|
70
|
+
return lower.includes('trigger') ||
|
|
71
|
+
lower.includes('webhook') ||
|
|
72
|
+
lower.includes('n8n-nodes-base.start') ||
|
|
73
|
+
lower.includes('n8n-nodes-base.poll');
|
|
74
|
+
};
|
|
75
|
+
nodes.forEach(node => {
|
|
76
|
+
// 2.1 Orphan Check
|
|
77
|
+
if (!destinations.has(node.name) && !isTrigger(node.type)) {
|
|
78
|
+
// It's an orphan unless it's a known trigger-like node
|
|
79
|
+
// Sticky notes and Merge nodes can be tricky, but generally Merge needs input.
|
|
80
|
+
if (!node.type.includes('StickyNote')) {
|
|
81
|
+
// Double check for "On Execution" (custom trigger name sometimes used)
|
|
82
|
+
if (!node.name.toLowerCase().includes('trigger') && !node.name.toLowerCase().includes('webhook')) {
|
|
83
|
+
console.log(theme.warn(`[Reviewer] Validated disconnection: Node "${node.name}" has no incoming connections.`));
|
|
84
|
+
validationErrors.push(`Node "${node.name}" (${node.type}) is disconnected (orphaned). Connect it or remove it.`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
// 2.2 Sub-Workflow Validation
|
|
89
|
+
if (node.type === 'n8n-nodes-base.executeWorkflow') {
|
|
90
|
+
const workflowId = node.parameters?.workflowId;
|
|
91
|
+
const mode = node.parameters?.mode || 'id'; // default is often ID
|
|
92
|
+
if (!workflowId && mode === 'id') {
|
|
93
|
+
validationErrors.push(`Node "${node.name}" (Execute Workflow) is missing a 'workflowId' parameter.`);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
// 3. Credentials Check
|
|
98
|
+
// If we see an OpenAI node, warn if no credential ID is placeholder? (Skip for now)
|
|
99
|
+
if (validationErrors.length > 0) {
|
|
100
|
+
return {
|
|
101
|
+
validationStatus: 'failed',
|
|
102
|
+
validationErrors: validationErrors,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
// console.log(theme.success("Reviewer passed the blueprint."));
|
|
106
|
+
return {
|
|
107
|
+
validationStatus: 'passed',
|
|
108
|
+
// Clear errors from previous runs
|
|
109
|
+
validationErrors: []
|
|
110
|
+
};
|
|
111
|
+
};
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { theme } from "../../utils/theme.js";
|
|
2
|
+
export const supervisorNode = async (state) => {
|
|
3
|
+
const candidates = state.candidates;
|
|
4
|
+
if (!candidates || candidates.length === 0) {
|
|
5
|
+
// Fallback: use existing workflowJson if available
|
|
6
|
+
return {};
|
|
7
|
+
}
|
|
8
|
+
console.log(theme.agent(`Supervisor found ${candidates.length} candidates.`));
|
|
9
|
+
// In a real agentic system, we would have an LLM evaluate them.
|
|
10
|
+
// For now, we'll pick the first one (or the one with the most nodes? or custom logic?).
|
|
11
|
+
// Let's simulate "Selection":
|
|
12
|
+
const bestCandidate = candidates[0];
|
|
13
|
+
console.log(theme.success(`Supervisor selected: ${bestCandidate.name || "Unnamed Workflow"}`));
|
|
14
|
+
// We set the chosen one as the canonical 'workflowJson' for the rest of the flow (QA, etc)
|
|
15
|
+
return {
|
|
16
|
+
workflowJson: bestCandidate
|
|
17
|
+
};
|
|
18
|
+
};
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
2
|
+
export declare const TeamState: import("@langchain/langgraph").AnnotationRoot<{
|
|
3
|
+
userGoal: {
|
|
4
|
+
(): import("@langchain/langgraph").LastValue<string>;
|
|
5
|
+
(annotation: import("@langchain/langgraph").SingleReducer<string, string>): import("@langchain/langgraph").BinaryOperatorAggregate<string, string>;
|
|
6
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
7
|
+
};
|
|
8
|
+
spec: {
|
|
9
|
+
(): import("@langchain/langgraph").LastValue<any>;
|
|
10
|
+
(annotation: import("@langchain/langgraph").SingleReducer<any, any>): import("@langchain/langgraph").BinaryOperatorAggregate<any, any>;
|
|
11
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
12
|
+
};
|
|
13
|
+
workflowJson: {
|
|
14
|
+
(): import("@langchain/langgraph").LastValue<any>;
|
|
15
|
+
(annotation: import("@langchain/langgraph").SingleReducer<any, any>): import("@langchain/langgraph").BinaryOperatorAggregate<any, any>;
|
|
16
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
17
|
+
};
|
|
18
|
+
validationErrors: {
|
|
19
|
+
(): import("@langchain/langgraph").LastValue<string[]>;
|
|
20
|
+
(annotation: import("@langchain/langgraph").SingleReducer<string[], string[]>): import("@langchain/langgraph").BinaryOperatorAggregate<string[], string[]>;
|
|
21
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
22
|
+
};
|
|
23
|
+
messages: import("@langchain/langgraph").BinaryOperatorAggregate<BaseMessage<import("@langchain/core/messages").MessageStructure<import("@langchain/core/messages").MessageToolSet>, import("@langchain/core/messages").MessageType>[], BaseMessage<import("@langchain/core/messages").MessageStructure<import("@langchain/core/messages").MessageToolSet>, import("@langchain/core/messages").MessageType>[]>;
|
|
24
|
+
needsClarification: {
|
|
25
|
+
(): import("@langchain/langgraph").LastValue<boolean>;
|
|
26
|
+
(annotation: import("@langchain/langgraph").SingleReducer<boolean, boolean>): import("@langchain/langgraph").BinaryOperatorAggregate<boolean, boolean>;
|
|
27
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
28
|
+
};
|
|
29
|
+
validationStatus: {
|
|
30
|
+
(): import("@langchain/langgraph").LastValue<"passed" | "failed">;
|
|
31
|
+
(annotation: import("@langchain/langgraph").SingleReducer<"passed" | "failed", "passed" | "failed">): import("@langchain/langgraph").BinaryOperatorAggregate<"passed" | "failed", "passed" | "failed">;
|
|
32
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
33
|
+
};
|
|
34
|
+
availableNodeTypes: {
|
|
35
|
+
(): import("@langchain/langgraph").LastValue<string[]>;
|
|
36
|
+
(annotation: import("@langchain/langgraph").SingleReducer<string[], string[]>): import("@langchain/langgraph").BinaryOperatorAggregate<string[], string[]>;
|
|
37
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
38
|
+
};
|
|
39
|
+
revisionCount: {
|
|
40
|
+
(): import("@langchain/langgraph").LastValue<number>;
|
|
41
|
+
(annotation: import("@langchain/langgraph").SingleReducer<number, number>): import("@langchain/langgraph").BinaryOperatorAggregate<number, number>;
|
|
42
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
43
|
+
};
|
|
44
|
+
strategies: {
|
|
45
|
+
(): import("@langchain/langgraph").LastValue<any[]>;
|
|
46
|
+
(annotation: import("@langchain/langgraph").SingleReducer<any[], any[]>): import("@langchain/langgraph").BinaryOperatorAggregate<any[], any[]>;
|
|
47
|
+
Root: <S extends import("@langchain/langgraph").StateDefinition>(sd: S) => import("@langchain/langgraph").AnnotationRoot<S>;
|
|
48
|
+
};
|
|
49
|
+
candidates: import("@langchain/langgraph").BinaryOperatorAggregate<any[], any[]>;
|
|
50
|
+
customTools: import("@langchain/langgraph").BinaryOperatorAggregate<Record<string, string>, Record<string, string>>;
|
|
51
|
+
}>;
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { Annotation } from "@langchain/langgraph";
|
|
2
|
+
export const TeamState = Annotation.Root({
|
|
3
|
+
userGoal: (Annotation),
|
|
4
|
+
spec: (Annotation),
|
|
5
|
+
workflowJson: (Annotation),
|
|
6
|
+
validationErrors: (Annotation),
|
|
7
|
+
messages: Annotation({
|
|
8
|
+
reducer: (x, y) => x.concat(y),
|
|
9
|
+
default: () => [],
|
|
10
|
+
}),
|
|
11
|
+
needsClarification: (Annotation),
|
|
12
|
+
validationStatus: (Annotation),
|
|
13
|
+
availableNodeTypes: (Annotation),
|
|
14
|
+
revisionCount: (Annotation),
|
|
15
|
+
// Parallel Execution Support
|
|
16
|
+
strategies: (Annotation),
|
|
17
|
+
candidates: Annotation({
|
|
18
|
+
reducer: (x, y) => x.concat(y),
|
|
19
|
+
default: () => [],
|
|
20
|
+
}),
|
|
21
|
+
// Dynamic Tools
|
|
22
|
+
customTools: Annotation({
|
|
23
|
+
reducer: (x, y) => ({ ...x, ...y }),
|
|
24
|
+
default: () => ({}),
|
|
25
|
+
}),
|
|
26
|
+
});
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { Command } from '@oclif/core';
|
|
2
|
+
export default class Config extends Command {
|
|
3
|
+
static description: string;
|
|
4
|
+
static flags: {
|
|
5
|
+
'n8n-url': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
6
|
+
'n8n-key': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
7
|
+
'ai-key': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
8
|
+
'ai-provider': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
9
|
+
'ai-model': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
10
|
+
'ai-base-url': import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
11
|
+
};
|
|
12
|
+
run(): Promise<void>;
|
|
13
|
+
}
|