@prompd/cli 0.4.9 → 0.4.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/nodeTypeRegistry.d.ts.map +1 -1
- package/dist/lib/nodeTypeRegistry.js +5 -1
- package/dist/lib/nodeTypeRegistry.js.map +1 -1
- package/dist/lib/workflowExecutor.d.ts +12 -1
- package/dist/lib/workflowExecutor.d.ts.map +1 -1
- package/dist/lib/workflowExecutor.js +747 -294
- package/dist/lib/workflowExecutor.js.map +1 -1
- package/dist/lib/workflowParser.d.ts.map +1 -1
- package/dist/lib/workflowParser.js +2 -0
- package/dist/lib/workflowParser.js.map +1 -1
- package/dist/lib/workflowTypes.d.ts +17 -1
- package/dist/lib/workflowTypes.d.ts.map +1 -1
- package/dist/lib/workflowTypes.js.map +1 -1
- package/package.json +1 -1
|
@@ -3,6 +3,9 @@
|
|
|
3
3
|
* WorkflowExecutor - Execute workflow graphs with sequential, conditional, and parallel support
|
|
4
4
|
* Includes comprehensive execution tracing, debugging, and step-through capabilities
|
|
5
5
|
*/
|
|
6
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
7
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
8
|
+
};
|
|
6
9
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
10
|
exports.executeWorkflow = executeWorkflow;
|
|
8
11
|
exports.createPromptExecutor = createPromptExecutor;
|
|
@@ -11,6 +14,11 @@ exports.exportTraceAsJson = exportTraceAsJson;
|
|
|
11
14
|
exports.downloadTrace = downloadTrace;
|
|
12
15
|
exports.formatTraceEntry = formatTraceEntry;
|
|
13
16
|
exports.getTraceSummary = getTraceSummary;
|
|
17
|
+
const child_process_1 = require("child_process");
|
|
18
|
+
const fs_1 = require("fs");
|
|
19
|
+
const os_1 = require("os");
|
|
20
|
+
const path_1 = require("path");
|
|
21
|
+
const vm_1 = __importDefault(require("vm"));
|
|
14
22
|
const workflowParser_1 = require("./workflowParser");
|
|
15
23
|
const memoryBackend_1 = require("./memoryBackend");
|
|
16
24
|
/**
|
|
@@ -639,8 +647,29 @@ async function executeWorkflow(workflow, params, options = {}) {
|
|
|
639
647
|
message: `Starting workflow execution with ${executionOrder.length} nodes`,
|
|
640
648
|
data: { executionOrder, mode: executionMode },
|
|
641
649
|
}, options);
|
|
642
|
-
// Track previous output for auto-inject
|
|
650
|
+
// Track previous output for auto-inject (fallback for nodes with no incoming edges)
|
|
643
651
|
let previousOutput = undefined;
|
|
652
|
+
// Build a predecessor map: for each node, find its graph predecessor(s) from incoming execution edges.
|
|
653
|
+
// This ensures previous_output resolves to the correct upstream node's output,
|
|
654
|
+
// not just the chronologically last-executed node (which breaks parallel branches).
|
|
655
|
+
const predecessorMap = new Map();
|
|
656
|
+
for (const edge of workflowFile.edges) {
|
|
657
|
+
// Only consider execution flow edges (same filtering as topological sort)
|
|
658
|
+
if (edge.sourceHandle === 'loop-end' || edge.sourceHandle === 'parallel-end')
|
|
659
|
+
continue;
|
|
660
|
+
if (edge.targetHandle && edge.targetHandle.startsWith('fork-'))
|
|
661
|
+
continue;
|
|
662
|
+
const eventBasedHandles = ['onError', 'onCheckpoint', 'onProgress', 'toolResult'];
|
|
663
|
+
if (edge.sourceHandle && eventBasedHandles.includes(edge.sourceHandle))
|
|
664
|
+
continue;
|
|
665
|
+
// Skip merge input handles — merge nodes have their own edge-based collection logic
|
|
666
|
+
if (edge.targetHandle && edge.targetHandle.startsWith('input-'))
|
|
667
|
+
continue;
|
|
668
|
+
if (!predecessorMap.has(edge.target)) {
|
|
669
|
+
predecessorMap.set(edge.target, []);
|
|
670
|
+
}
|
|
671
|
+
predecessorMap.get(edge.target).push(edge.source);
|
|
672
|
+
}
|
|
644
673
|
// Track which nodes should be skipped due to condition branching
|
|
645
674
|
// When a condition node evaluates, only the target branch should execute
|
|
646
675
|
const skippedNodes = new Set();
|
|
@@ -771,247 +800,161 @@ async function executeWorkflow(workflow, params, options = {}) {
|
|
|
771
800
|
state.nodeStates[nodeId].status = 'running';
|
|
772
801
|
state.nodeStates[nodeId].startTime = nodeStartTime;
|
|
773
802
|
options.onProgress?.(deepClone(state));
|
|
803
|
+
// Resolve previous_output from graph predecessor(s) instead of chronological last-executed node.
|
|
804
|
+
// This ensures nodes in parallel branches get the correct upstream output.
|
|
805
|
+
let resolvedPreviousOutput = previousOutput;
|
|
806
|
+
const predecessors = predecessorMap.get(nodeId);
|
|
807
|
+
if (predecessors && predecessors.length > 0) {
|
|
808
|
+
// Use the first predecessor that has a completed output
|
|
809
|
+
for (const predId of predecessors) {
|
|
810
|
+
if (state.nodeOutputs[predId] !== undefined) {
|
|
811
|
+
resolvedPreviousOutput = state.nodeOutputs[predId];
|
|
812
|
+
break;
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
}
|
|
774
816
|
// Build context for expression evaluation
|
|
775
817
|
const context = {
|
|
776
818
|
nodeOutputs: state.nodeOutputs,
|
|
777
819
|
variables: state.variables,
|
|
778
820
|
workflow: params,
|
|
779
|
-
previous_output:
|
|
821
|
+
previous_output: resolvedPreviousOutput,
|
|
780
822
|
};
|
|
781
|
-
// Execute
|
|
782
|
-
let output;
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
const
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
802
|
-
// For each non-selected branch, mark downstream nodes as skipped
|
|
803
|
-
for (const [handleId, targetNodeId] of conditionInfo.edgeTargets) {
|
|
804
|
-
if (handleId !== selectedHandle) {
|
|
805
|
-
// Get all nodes downstream of this non-selected branch
|
|
806
|
-
// Stop at merge nodes since those are where branches converge
|
|
807
|
-
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
808
|
-
for (const skipNodeId of downstreamNodes) {
|
|
809
|
-
// Don't skip merge nodes - they may receive input from the selected branch
|
|
810
|
-
if (!mergeNodes.has(skipNodeId)) {
|
|
811
|
-
skippedNodes.add(skipNodeId);
|
|
812
|
-
}
|
|
823
|
+
// Execute node via shared dispatch (single source of truth for node type -> handler mapping)
|
|
824
|
+
let output = await dispatchNode(node, context, options, state, workflowFile, trace, memoryBackend, { executionOrder, branchingTargetMap, skippedNodes });
|
|
825
|
+
// ── Post-dispatch branching logic ──
|
|
826
|
+
// Certain node types produce branching outputs that determine which downstream
|
|
827
|
+
// paths should be skipped. This logic is main-loop-only (not needed in subset execution).
|
|
828
|
+
if (node.type === 'condition') {
|
|
829
|
+
const conditionOutput = output;
|
|
830
|
+
const conditionInfo = branchingTargetMap.get(nodeId);
|
|
831
|
+
if (conditionInfo) {
|
|
832
|
+
const selectedHandle = conditionOutput.branch === 'default'
|
|
833
|
+
? 'default'
|
|
834
|
+
: `condition-${conditionOutput.branch}`;
|
|
835
|
+
const selectedTarget = conditionInfo.edgeTargets.get(selectedHandle);
|
|
836
|
+
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
837
|
+
for (const [handleId, targetNodeId] of conditionInfo.edgeTargets) {
|
|
838
|
+
if (handleId !== selectedHandle) {
|
|
839
|
+
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
840
|
+
for (const skipNodeId of downstreamNodes) {
|
|
841
|
+
if (!mergeNodes.has(skipNodeId)) {
|
|
842
|
+
skippedNodes.add(skipNodeId);
|
|
813
843
|
}
|
|
814
|
-
addTraceEntry(trace, {
|
|
815
|
-
type: 'debug_step',
|
|
816
|
-
nodeId,
|
|
817
|
-
message: `Skipping branch '${handleId}' (not selected), marking ${downstreamNodes.size} downstream nodes`,
|
|
818
|
-
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
819
|
-
}, options);
|
|
820
844
|
}
|
|
845
|
+
addTraceEntry(trace, {
|
|
846
|
+
type: 'debug_step',
|
|
847
|
+
nodeId,
|
|
848
|
+
message: `Skipping branch '${handleId}' (not selected), marking ${downstreamNodes.size} downstream nodes`,
|
|
849
|
+
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
850
|
+
}, options);
|
|
821
851
|
}
|
|
822
|
-
addTraceEntry(trace, {
|
|
823
|
-
type: 'expression_eval',
|
|
824
|
-
nodeId,
|
|
825
|
-
nodeName: node.data.label,
|
|
826
|
-
message: `Condition selected branch '${conditionOutput.branch}' -> target '${selectedTarget || 'none'}'`,
|
|
827
|
-
data: { branch: conditionOutput.branch, target: selectedTarget, handle: selectedHandle },
|
|
828
|
-
}, options);
|
|
829
|
-
}
|
|
830
|
-
break;
|
|
831
|
-
}
|
|
832
|
-
case 'loop':
|
|
833
|
-
output = await executeLoopNode(node, context, options, state, workflowFile, trace, memoryBackend);
|
|
834
|
-
break;
|
|
835
|
-
case 'parallel':
|
|
836
|
-
output = await executeParallelNode(node, context, options, state, workflowFile, trace, memoryBackend);
|
|
837
|
-
break;
|
|
838
|
-
case 'merge': {
|
|
839
|
-
const mergeResult = executeMergeNode(node, context, workflowFile, skippedNodes);
|
|
840
|
-
// Check if merge is waiting for more inputs
|
|
841
|
-
if (mergeResult && typeof mergeResult === 'object' && 'waiting' in mergeResult && mergeResult.waiting) {
|
|
842
|
-
// In wait mode, the merge node needs more inputs
|
|
843
|
-
// This shouldn't happen with proper topological ordering, but handle it gracefully
|
|
844
|
-
addTraceEntry(trace, {
|
|
845
|
-
type: 'debug_step',
|
|
846
|
-
nodeId,
|
|
847
|
-
nodeName: node.data.label,
|
|
848
|
-
message: `Merge node waiting for inputs: ${mergeResult.missingInputs.join(', ')}`,
|
|
849
|
-
data: { missingInputs: mergeResult.missingInputs },
|
|
850
|
-
}, options);
|
|
851
|
-
// For now, proceed with empty result - proper handling would require reordering execution
|
|
852
|
-
output = {};
|
|
853
|
-
}
|
|
854
|
-
else {
|
|
855
|
-
output = mergeResult;
|
|
856
852
|
}
|
|
857
|
-
|
|
853
|
+
addTraceEntry(trace, {
|
|
854
|
+
type: 'expression_eval',
|
|
855
|
+
nodeId,
|
|
856
|
+
nodeName: node.data.label,
|
|
857
|
+
message: `Condition selected branch '${conditionOutput.branch}' -> target '${selectedTarget || 'none'}'`,
|
|
858
|
+
data: { branch: conditionOutput.branch, target: selectedTarget, handle: selectedHandle },
|
|
859
|
+
}, options);
|
|
858
860
|
}
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
output = await executeToolNode(node, context, options, trace);
|
|
874
|
-
break;
|
|
875
|
-
case 'tool-call-parser': {
|
|
876
|
-
output = executeToolCallParserNode(node, context, trace, options);
|
|
877
|
-
// Handle branching based on whether a tool call was found
|
|
878
|
-
const parserOutput = output;
|
|
879
|
-
const parserInfo = branchingTargetMap.get(nodeId);
|
|
880
|
-
if (parserInfo) {
|
|
881
|
-
// Select handle based on hasToolCall result
|
|
882
|
-
const selectedHandle = parserOutput.hasToolCall ? 'found' : 'not-found';
|
|
883
|
-
const selectedTarget = parserInfo.edgeTargets.get(selectedHandle);
|
|
884
|
-
// Find merge nodes that act as convergence points
|
|
885
|
-
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
886
|
-
// For the non-selected branch, mark downstream nodes as skipped
|
|
887
|
-
for (const [handleId, targetNodeId] of parserInfo.edgeTargets) {
|
|
888
|
-
if (handleId !== selectedHandle) {
|
|
889
|
-
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
890
|
-
for (const skipNodeId of downstreamNodes) {
|
|
891
|
-
if (!mergeNodes.has(skipNodeId)) {
|
|
892
|
-
skippedNodes.add(skipNodeId);
|
|
893
|
-
}
|
|
861
|
+
}
|
|
862
|
+
else if (node.type === 'tool-call-parser') {
|
|
863
|
+
const parserOutput = output;
|
|
864
|
+
const parserInfo = branchingTargetMap.get(nodeId);
|
|
865
|
+
if (parserInfo) {
|
|
866
|
+
const selectedHandle = parserOutput.hasToolCall ? 'found' : 'not-found';
|
|
867
|
+
const selectedTarget = parserInfo.edgeTargets.get(selectedHandle);
|
|
868
|
+
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
869
|
+
for (const [handleId, targetNodeId] of parserInfo.edgeTargets) {
|
|
870
|
+
if (handleId !== selectedHandle) {
|
|
871
|
+
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
872
|
+
for (const skipNodeId of downstreamNodes) {
|
|
873
|
+
if (!mergeNodes.has(skipNodeId)) {
|
|
874
|
+
skippedNodes.add(skipNodeId);
|
|
894
875
|
}
|
|
895
|
-
addTraceEntry(trace, {
|
|
896
|
-
type: 'debug_step',
|
|
897
|
-
nodeId,
|
|
898
|
-
message: `Skipping branch '${handleId}' (tool call ${parserOutput.hasToolCall ? 'found' : 'not found'}), marking ${downstreamNodes.size} downstream nodes`,
|
|
899
|
-
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
900
|
-
}, options);
|
|
901
876
|
}
|
|
877
|
+
addTraceEntry(trace, {
|
|
878
|
+
type: 'debug_step',
|
|
879
|
+
nodeId,
|
|
880
|
+
message: `Skipping branch '${handleId}' (tool call ${parserOutput.hasToolCall ? 'found' : 'not found'}), marking ${downstreamNodes.size} downstream nodes`,
|
|
881
|
+
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
882
|
+
}, options);
|
|
902
883
|
}
|
|
903
|
-
addTraceEntry(trace, {
|
|
904
|
-
type: 'expression_eval',
|
|
905
|
-
nodeId,
|
|
906
|
-
nodeName: node.data.label,
|
|
907
|
-
message: `Tool call parser: ${parserOutput.hasToolCall ? `found '${parserOutput.toolName}'` : 'no tool call'} -> ${selectedHandle}`,
|
|
908
|
-
data: { hasToolCall: parserOutput.hasToolCall, toolName: parserOutput.toolName, selectedHandle, selectedTarget },
|
|
909
|
-
}, options);
|
|
910
884
|
}
|
|
911
|
-
|
|
885
|
+
addTraceEntry(trace, {
|
|
886
|
+
type: 'expression_eval',
|
|
887
|
+
nodeId,
|
|
888
|
+
nodeName: node.data.label,
|
|
889
|
+
message: `Tool call parser: ${parserOutput.hasToolCall ? `found '${parserOutput.toolName}'` : 'no tool call'} -> ${selectedHandle}`,
|
|
890
|
+
data: { hasToolCall: parserOutput.hasToolCall, toolName: parserOutput.toolName, selectedHandle, selectedTarget },
|
|
891
|
+
}, options);
|
|
912
892
|
}
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
const
|
|
920
|
-
const
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
927
|
-
for (const skipNodeId of downstreamNodes) {
|
|
928
|
-
if (!mergeNodes.has(skipNodeId)) {
|
|
929
|
-
skippedNodes.add(skipNodeId);
|
|
930
|
-
}
|
|
893
|
+
}
|
|
894
|
+
else if (node.type === 'chat-agent') {
|
|
895
|
+
const chatAgentOutput = output;
|
|
896
|
+
const chatAgentInfo = branchingTargetMap.get(nodeId);
|
|
897
|
+
if (chatAgentInfo && chatAgentOutput?.rejected !== undefined) {
|
|
898
|
+
const selectedHandle = chatAgentOutput.rejected ? 'rejected' : 'output';
|
|
899
|
+
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
900
|
+
for (const [handleId, targetNodeId] of chatAgentInfo.edgeTargets) {
|
|
901
|
+
if (handleId !== selectedHandle) {
|
|
902
|
+
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
903
|
+
for (const skipNodeId of downstreamNodes) {
|
|
904
|
+
if (!mergeNodes.has(skipNodeId)) {
|
|
905
|
+
skippedNodes.add(skipNodeId);
|
|
931
906
|
}
|
|
932
907
|
}
|
|
933
908
|
}
|
|
934
909
|
}
|
|
935
|
-
break;
|
|
936
910
|
}
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
if (handleId !== selectedHandle) {
|
|
952
|
-
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
953
|
-
for (const skipNodeId of downstreamNodes) {
|
|
954
|
-
if (!mergeNodes.has(skipNodeId)) {
|
|
955
|
-
skippedNodes.add(skipNodeId);
|
|
956
|
-
}
|
|
911
|
+
}
|
|
912
|
+
else if (node.type === 'guardrail') {
|
|
913
|
+
const guardrailOutput = output;
|
|
914
|
+
const guardrailInfo = branchingTargetMap.get(nodeId);
|
|
915
|
+
if (guardrailInfo) {
|
|
916
|
+
const selectedHandle = guardrailOutput.rejected ? 'rejected' : 'output';
|
|
917
|
+
const selectedTarget = guardrailInfo.edgeTargets.get(selectedHandle);
|
|
918
|
+
const mergeNodes = findMergeNodesDownstream(nodeId, workflowFile);
|
|
919
|
+
for (const [handleId, targetNodeId] of guardrailInfo.edgeTargets) {
|
|
920
|
+
if (handleId !== selectedHandle) {
|
|
921
|
+
const downstreamNodes = getDownstreamNodes(targetNodeId, workflowFile, mergeNodes);
|
|
922
|
+
for (const skipNodeId of downstreamNodes) {
|
|
923
|
+
if (!mergeNodes.has(skipNodeId)) {
|
|
924
|
+
skippedNodes.add(skipNodeId);
|
|
957
925
|
}
|
|
958
|
-
addTraceEntry(trace, {
|
|
959
|
-
type: 'debug_step',
|
|
960
|
-
nodeId,
|
|
961
|
-
message: `Skipping branch '${handleId}' (input ${guardrailOutput.rejected ? 'rejected' : 'passed'}), marking ${downstreamNodes.size} downstream nodes`,
|
|
962
|
-
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
963
|
-
}, options);
|
|
964
926
|
}
|
|
927
|
+
addTraceEntry(trace, {
|
|
928
|
+
type: 'debug_step',
|
|
929
|
+
nodeId,
|
|
930
|
+
message: `Skipping branch '${handleId}' (input ${guardrailOutput.rejected ? 'rejected' : 'passed'}), marking ${downstreamNodes.size} downstream nodes`,
|
|
931
|
+
data: { handleId, targetNodeId, skippedCount: downstreamNodes.size },
|
|
932
|
+
}, options);
|
|
965
933
|
}
|
|
966
|
-
addTraceEntry(trace, {
|
|
967
|
-
type: 'expression_eval',
|
|
968
|
-
nodeId,
|
|
969
|
-
nodeName: node.data.label,
|
|
970
|
-
message: `Guardrail: input ${guardrailOutput.rejected ? 'rejected' : 'passed'}${guardrailOutput.score !== undefined ? ` (score: ${guardrailOutput.score})` : ''} -> ${selectedHandle}`,
|
|
971
|
-
data: { rejected: guardrailOutput.rejected, score: guardrailOutput.score, selectedHandle, selectedTarget },
|
|
972
|
-
}, options);
|
|
973
934
|
}
|
|
974
|
-
break;
|
|
975
|
-
}
|
|
976
|
-
case 'command':
|
|
977
|
-
output = await executeCommandNode(node, context, options, trace);
|
|
978
|
-
break;
|
|
979
|
-
case 'web-search':
|
|
980
|
-
output = await executeWebSearchNode(node, context, options, trace);
|
|
981
|
-
break;
|
|
982
|
-
case 'claude-code':
|
|
983
|
-
// Claude Code node - requires Electron for SSH support
|
|
984
935
|
addTraceEntry(trace, {
|
|
985
|
-
type: '
|
|
936
|
+
type: 'expression_eval',
|
|
986
937
|
nodeId,
|
|
987
938
|
nodeName: node.data.label,
|
|
988
|
-
message: '
|
|
939
|
+
message: `Guardrail: input ${guardrailOutput.rejected ? 'rejected' : 'passed'}${guardrailOutput.score !== undefined ? ` (score: ${guardrailOutput.score})` : ''} -> ${selectedHandle}`,
|
|
940
|
+
data: { rejected: guardrailOutput.rejected, score: guardrailOutput.score, selectedHandle, selectedTarget },
|
|
989
941
|
}, options);
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
942
|
+
}
|
|
943
|
+
}
|
|
944
|
+
else if (node.type === 'merge') {
|
|
945
|
+
// Check if merge is waiting for more inputs
|
|
946
|
+
const mergeOut = output;
|
|
947
|
+
if (mergeOut && typeof mergeOut === 'object' && mergeOut.waiting) {
|
|
948
|
+
const missingInputs = mergeOut.missingInputs || [];
|
|
994
949
|
addTraceEntry(trace, {
|
|
995
950
|
type: 'debug_step',
|
|
996
951
|
nodeId,
|
|
997
952
|
nodeName: node.data.label,
|
|
998
|
-
message:
|
|
953
|
+
message: `Merge node waiting for inputs: ${missingInputs.join(', ')}`,
|
|
954
|
+
data: { missingInputs },
|
|
999
955
|
}, options);
|
|
1000
|
-
output =
|
|
1001
|
-
|
|
1002
|
-
case 'mcp-tool':
|
|
1003
|
-
output = await executeMcpToolNode(node, context, options, trace);
|
|
1004
|
-
break;
|
|
1005
|
-
case 'output':
|
|
1006
|
-
// Output node just passes through the previous output
|
|
1007
|
-
output = previousOutput;
|
|
1008
|
-
break;
|
|
1009
|
-
case 'trigger':
|
|
1010
|
-
// Trigger node outputs the workflow parameters so downstream nodes can access them
|
|
1011
|
-
output = Object.keys(params).length > 0 ? params : previousOutput;
|
|
1012
|
-
break;
|
|
1013
|
-
default:
|
|
1014
|
-
output = previousOutput;
|
|
956
|
+
output = {};
|
|
957
|
+
}
|
|
1015
958
|
}
|
|
1016
959
|
const nodeDuration = Date.now() - nodeStartTime;
|
|
1017
960
|
// Store output
|
|
@@ -1610,6 +1553,82 @@ function executeConditionNode(node, context) {
|
|
|
1610
1553
|
// Return default
|
|
1611
1554
|
return { branch: 'default', target: data.default || '' };
|
|
1612
1555
|
}
|
|
1556
|
+
/**
|
|
1557
|
+
* Dispatch a single node for execution based on its type.
|
|
1558
|
+
* This is the single source of truth for which function handles which node type.
|
|
1559
|
+
* Both the main execution loop and executeNodeSubset call this to avoid
|
|
1560
|
+
* duplicating the node-type switch statement.
|
|
1561
|
+
*
|
|
1562
|
+
* NOTE: This function handles pure execution only. Branching logic (condition,
|
|
1563
|
+
* tool-call-parser, guardrail, chat-agent) that manipulates skippedNodes and
|
|
1564
|
+
* branchingTargetMap is handled by the caller (main loop) after dispatch.
|
|
1565
|
+
*/
|
|
1566
|
+
async function dispatchNode(node, context, options, state, workflowFile, trace, memoryBackend, extra) {
|
|
1567
|
+
const previousOutput = context.previous_output;
|
|
1568
|
+
switch (node.type) {
|
|
1569
|
+
case 'prompt':
|
|
1570
|
+
return executePromptNode(node, context, options, trace, state, workflowFile);
|
|
1571
|
+
case 'condition':
|
|
1572
|
+
return executeConditionNode(node, context);
|
|
1573
|
+
case 'loop':
|
|
1574
|
+
return executeLoopNode(node, context, options, state, workflowFile, trace, memoryBackend);
|
|
1575
|
+
case 'parallel':
|
|
1576
|
+
return executeParallelNode(node, context, options, state, workflowFile, trace, memoryBackend, extra?.skippedNodes);
|
|
1577
|
+
case 'merge':
|
|
1578
|
+
return executeMergeNode(node, context, workflowFile, extra?.skippedNodes);
|
|
1579
|
+
case 'transformer':
|
|
1580
|
+
return executeTransformerNode(node, context);
|
|
1581
|
+
case 'code':
|
|
1582
|
+
return executeCodeNode(node, context);
|
|
1583
|
+
case 'memory':
|
|
1584
|
+
return executeMemoryNode(node, context, state, memoryBackend);
|
|
1585
|
+
case 'callback':
|
|
1586
|
+
case 'checkpoint':
|
|
1587
|
+
return executeCallbackNode(node, context, options, state, workflowFile, extra?.executionOrder || [], trace);
|
|
1588
|
+
case 'user-input':
|
|
1589
|
+
return executeUserInputNode(node, context, options, trace);
|
|
1590
|
+
case 'tool':
|
|
1591
|
+
return executeToolNode(node, context, options, trace);
|
|
1592
|
+
case 'tool-call-parser':
|
|
1593
|
+
return executeToolCallParserNode(node, context, trace, options);
|
|
1594
|
+
case 'agent':
|
|
1595
|
+
return executeAgentNode(node, context, options, trace, state, workflowFile, memoryBackend);
|
|
1596
|
+
case 'chat-agent':
|
|
1597
|
+
return executeChatAgentNode(node, context, options, trace, state, workflowFile, extra?.branchingTargetMap || new Map(), extra?.skippedNodes || new Set(), memoryBackend);
|
|
1598
|
+
case 'guardrail':
|
|
1599
|
+
return executeGuardrailNode(node, context, options, trace, workflowFile);
|
|
1600
|
+
case 'command':
|
|
1601
|
+
return executeCommandNode(node, context, options, trace);
|
|
1602
|
+
case 'web-search':
|
|
1603
|
+
return executeWebSearchNode(node, context, options, trace);
|
|
1604
|
+
case 'claude-code':
|
|
1605
|
+
addTraceEntry(trace, {
|
|
1606
|
+
type: 'debug_step',
|
|
1607
|
+
nodeId: node.id,
|
|
1608
|
+
nodeName: node.data.label,
|
|
1609
|
+
message: 'Claude Code node execution not yet implemented (requires Electron SSH)',
|
|
1610
|
+
}, options);
|
|
1611
|
+
return previousOutput;
|
|
1612
|
+
case 'workflow':
|
|
1613
|
+
addTraceEntry(trace, {
|
|
1614
|
+
type: 'debug_step',
|
|
1615
|
+
nodeId: node.id,
|
|
1616
|
+
nodeName: node.data.label,
|
|
1617
|
+
message: 'Sub-workflow node execution not yet implemented',
|
|
1618
|
+
}, options);
|
|
1619
|
+
return previousOutput;
|
|
1620
|
+
case 'mcp-tool':
|
|
1621
|
+
return executeMcpToolNode(node, context, options, trace);
|
|
1622
|
+
case 'output':
|
|
1623
|
+
return previousOutput;
|
|
1624
|
+
case 'trigger':
|
|
1625
|
+
return Object.keys(context.workflow).length > 0 ? context.workflow : previousOutput;
|
|
1626
|
+
case 'database-query':
|
|
1627
|
+
return executeDatabaseQueryNode(node, context, options, trace);
|
|
1628
|
+
default:
|
|
1629
|
+
return previousOutput;
|
|
1630
|
+
}
|
|
1631
|
+
}
|
|
1613
1632
|
/**
|
|
1614
1633
|
* Execute a subset of nodes (used by loop and parallel nodes)
|
|
1615
1634
|
* Returns the output of the last executed node
|
|
@@ -1674,60 +1693,8 @@ async function executeNodeSubset(nodeIds, workflowFile, context, options, state,
|
|
|
1674
1693
|
workflow: context.workflow,
|
|
1675
1694
|
previous_output: lastOutput,
|
|
1676
1695
|
};
|
|
1677
|
-
//
|
|
1678
|
-
|
|
1679
|
-
switch (node.type) {
|
|
1680
|
-
case 'prompt':
|
|
1681
|
-
output = await executePromptNode(node, nodeContext, options, trace, state, workflowFile);
|
|
1682
|
-
break;
|
|
1683
|
-
case 'condition':
|
|
1684
|
-
output = executeConditionNode(node, nodeContext);
|
|
1685
|
-
break;
|
|
1686
|
-
case 'transformer':
|
|
1687
|
-
output = executeTransformerNode(node, nodeContext);
|
|
1688
|
-
break;
|
|
1689
|
-
case 'memory':
|
|
1690
|
-
output = await executeMemoryNode(node, nodeContext, state, memoryBackend);
|
|
1691
|
-
break;
|
|
1692
|
-
case 'merge':
|
|
1693
|
-
output = executeMergeNode(node, nodeContext);
|
|
1694
|
-
break;
|
|
1695
|
-
case 'callback':
|
|
1696
|
-
case 'checkpoint':
|
|
1697
|
-
// Execute callback node with full checkpoint support (pause, report, etc.)
|
|
1698
|
-
// Need to pass execution order for next node info - use nodeIds as the subset order
|
|
1699
|
-
output = await executeCallbackNode(node, nodeContext, options, state, workflowFile, nodeIds, trace);
|
|
1700
|
-
break;
|
|
1701
|
-
case 'user-input':
|
|
1702
|
-
output = await executeUserInputNode(node, nodeContext, options, trace);
|
|
1703
|
-
break;
|
|
1704
|
-
case 'command':
|
|
1705
|
-
output = await executeCommandNode(node, nodeContext, options, trace);
|
|
1706
|
-
break;
|
|
1707
|
-
case 'web-search':
|
|
1708
|
-
output = await executeWebSearchNode(node, nodeContext, options, trace);
|
|
1709
|
-
break;
|
|
1710
|
-
case 'mcp-tool':
|
|
1711
|
-
output = await executeMcpToolNode(node, nodeContext, options, trace);
|
|
1712
|
-
break;
|
|
1713
|
-
case 'agent':
|
|
1714
|
-
output = await executeAgentNode(node, nodeContext, options, trace, state, workflowFile, memoryBackend);
|
|
1715
|
-
break;
|
|
1716
|
-
case 'chat-agent':
|
|
1717
|
-
// Chat Agent uses a simplified context since branching is handled internally
|
|
1718
|
-
output = await executeChatAgentNode(node, nodeContext, options, trace, state, workflowFile, new Map(), // branchingTargetMap - not used in parallel execution
|
|
1719
|
-
new Set(), // skippedNodes - not used in parallel execution
|
|
1720
|
-
memoryBackend);
|
|
1721
|
-
break;
|
|
1722
|
-
case 'guardrail':
|
|
1723
|
-
output = await executeGuardrailNode(node, nodeContext, options, trace, workflowFile);
|
|
1724
|
-
break;
|
|
1725
|
-
case 'output':
|
|
1726
|
-
output = lastOutput;
|
|
1727
|
-
break;
|
|
1728
|
-
default:
|
|
1729
|
-
output = lastOutput;
|
|
1730
|
-
}
|
|
1696
|
+
// Dispatch via shared single-source-of-truth switch
|
|
1697
|
+
const output = await dispatchNode(node, nodeContext, options, state, workflowFile, trace, memoryBackend, { executionOrder: nodeIds });
|
|
1731
1698
|
const nodeDuration = Date.now() - nodeStartTime;
|
|
1732
1699
|
// Store output
|
|
1733
1700
|
state.nodeOutputs[nodeId] = output;
|
|
@@ -1884,8 +1851,133 @@ async function executeLoopNode(node, context, options, state, workflowFile, trac
|
|
|
1884
1851
|
/**
|
|
1885
1852
|
* Execute a parallel node - executes all branches concurrently
|
|
1886
1853
|
*/
|
|
1887
|
-
async function executeParallelNode(node, context, options, state, workflowFile, trace, memoryBackend) {
|
|
1854
|
+
async function executeParallelNode(node, context, options, state, workflowFile, trace, memoryBackend, skippedNodes) {
|
|
1888
1855
|
const data = node.data;
|
|
1856
|
+
// ── Fork mode: edge-based parallelism ──
|
|
1857
|
+
// In fork mode, the parallel node is NOT a container. It has fork-0, fork-1, etc.
|
|
1858
|
+
// output handles that connect to downstream nodes. Each fork branch is traced
|
|
1859
|
+
// forward through edges until reaching a merge node (or dead end), then all
|
|
1860
|
+
// branches run concurrently via Promise.all.
|
|
1861
|
+
if (data.mode === 'fork') {
|
|
1862
|
+
// Find edges from this node's fork handles
|
|
1863
|
+
const forkEdges = workflowFile.edges.filter(edge => edge.source === node.id && edge.sourceHandle?.startsWith('fork-'));
|
|
1864
|
+
if (forkEdges.length === 0) {
|
|
1865
|
+
addTraceEntry(trace, {
|
|
1866
|
+
type: 'debug_step',
|
|
1867
|
+
nodeId: node.id,
|
|
1868
|
+
nodeName: node.data.label,
|
|
1869
|
+
nodeType: 'parallel',
|
|
1870
|
+
message: 'Fork mode: no fork edges found, passing through input',
|
|
1871
|
+
}, options);
|
|
1872
|
+
return context.previous_output;
|
|
1873
|
+
}
|
|
1874
|
+
// For each fork edge, trace the branch chain forward until a merge node or dead end
|
|
1875
|
+
const forkBranches = [];
|
|
1876
|
+
// Collect all merge node IDs so we know where to stop tracing
|
|
1877
|
+
const mergeNodeIds = new Set(workflowFile.nodes.filter(n => n.type === 'merge').map(n => n.id));
|
|
1878
|
+
for (const forkEdge of forkEdges) {
|
|
1879
|
+
const branchNodeIds = [];
|
|
1880
|
+
const visited = new Set();
|
|
1881
|
+
const queue = [forkEdge.target];
|
|
1882
|
+
while (queue.length > 0) {
|
|
1883
|
+
const currentId = queue.shift();
|
|
1884
|
+
if (visited.has(currentId) || mergeNodeIds.has(currentId))
|
|
1885
|
+
continue;
|
|
1886
|
+
visited.add(currentId);
|
|
1887
|
+
branchNodeIds.push(currentId);
|
|
1888
|
+
// Follow outgoing execution edges from this node
|
|
1889
|
+
for (const edge of workflowFile.edges) {
|
|
1890
|
+
if (edge.source !== currentId)
|
|
1891
|
+
continue;
|
|
1892
|
+
// Skip event-based and back-edges
|
|
1893
|
+
if (edge.sourceHandle === 'loop-end' || edge.sourceHandle === 'parallel-end')
|
|
1894
|
+
continue;
|
|
1895
|
+
if (edge.targetHandle?.startsWith('fork-'))
|
|
1896
|
+
continue;
|
|
1897
|
+
const eventHandles = ['onError', 'onCheckpoint', 'onProgress', 'toolResult'];
|
|
1898
|
+
if (edge.sourceHandle && eventHandles.includes(edge.sourceHandle))
|
|
1899
|
+
continue;
|
|
1900
|
+
// Skip edges going to merge input handles (that's the convergence point)
|
|
1901
|
+
if (edge.targetHandle?.startsWith('input-'))
|
|
1902
|
+
continue;
|
|
1903
|
+
if (!visited.has(edge.target) && !mergeNodeIds.has(edge.target)) {
|
|
1904
|
+
queue.push(edge.target);
|
|
1905
|
+
}
|
|
1906
|
+
}
|
|
1907
|
+
}
|
|
1908
|
+
const handleIndex = forkEdge.sourceHandle?.replace('fork-', '') || '0';
|
|
1909
|
+
const label = data.forkLabels?.[parseInt(handleIndex, 10)] || `Branch ${parseInt(handleIndex, 10) + 1}`;
|
|
1910
|
+
forkBranches.push({ handle: forkEdge.sourceHandle || `fork-${handleIndex}`, label, nodeIds: branchNodeIds });
|
|
1911
|
+
}
|
|
1912
|
+
addTraceEntry(trace, {
|
|
1913
|
+
type: 'debug_step',
|
|
1914
|
+
nodeId: node.id,
|
|
1915
|
+
nodeName: node.data.label,
|
|
1916
|
+
nodeType: 'parallel',
|
|
1917
|
+
message: `Fork mode: starting ${forkBranches.length} parallel branches (waitFor: ${data.waitFor})`,
|
|
1918
|
+
data: {
|
|
1919
|
+
branches: forkBranches.map(b => ({ handle: b.handle, label: b.label, nodeCount: b.nodeIds.length, nodes: b.nodeIds }))
|
|
1920
|
+
},
|
|
1921
|
+
}, options);
|
|
1922
|
+
// Mark all fork branch nodes so the main loop skips them —
|
|
1923
|
+
// they will be executed here concurrently instead of sequentially.
|
|
1924
|
+
if (skippedNodes) {
|
|
1925
|
+
for (const branch of forkBranches) {
|
|
1926
|
+
for (const nid of branch.nodeIds) {
|
|
1927
|
+
skippedNodes.add(nid);
|
|
1928
|
+
}
|
|
1929
|
+
}
|
|
1930
|
+
}
|
|
1931
|
+
// Run all branches concurrently
|
|
1932
|
+
const branchPromises = forkBranches.map(async (branch) => {
|
|
1933
|
+
try {
|
|
1934
|
+
const branchOutput = await executeNodeSubset(branch.nodeIds, workflowFile, { ...context, previous_output: context.previous_output }, options, state, trace, memoryBackend);
|
|
1935
|
+
addTraceEntry(trace, {
|
|
1936
|
+
type: 'debug_step',
|
|
1937
|
+
nodeId: node.id,
|
|
1938
|
+
message: `Fork branch '${branch.label}' completed`,
|
|
1939
|
+
data: { handle: branch.handle, output: branchOutput },
|
|
1940
|
+
}, options);
|
|
1941
|
+
return { branchId: branch.label, result: branchOutput, success: true };
|
|
1942
|
+
}
|
|
1943
|
+
catch (error) {
|
|
1944
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
1945
|
+
addTraceEntry(trace, {
|
|
1946
|
+
type: 'node_error',
|
|
1947
|
+
nodeId: node.id,
|
|
1948
|
+
message: `Fork branch '${branch.label}' failed: ${msg}`,
|
|
1949
|
+
data: { handle: branch.handle, error: msg },
|
|
1950
|
+
}, options);
|
|
1951
|
+
return { branchId: branch.label, result: null, success: false, error: msg };
|
|
1952
|
+
}
|
|
1953
|
+
});
|
|
1954
|
+
// Wait based on strategy
|
|
1955
|
+
let results;
|
|
1956
|
+
if (data.waitFor === 'race') {
|
|
1957
|
+
results = [await Promise.race(branchPromises)];
|
|
1958
|
+
}
|
|
1959
|
+
else if (data.waitFor === 'any') {
|
|
1960
|
+
const all = await Promise.all(branchPromises);
|
|
1961
|
+
const first = all.find(r => r.success);
|
|
1962
|
+
results = first ? [first] : all;
|
|
1963
|
+
}
|
|
1964
|
+
else {
|
|
1965
|
+
results = await Promise.all(branchPromises);
|
|
1966
|
+
}
|
|
1967
|
+
// Merge based on strategy
|
|
1968
|
+
if (data.mergeStrategy === 'object') {
|
|
1969
|
+
const merged = {};
|
|
1970
|
+
for (const r of results)
|
|
1971
|
+
merged[r.branchId] = r.result;
|
|
1972
|
+
return merged;
|
|
1973
|
+
}
|
|
1974
|
+
if (data.mergeStrategy === 'first') {
|
|
1975
|
+
const first = results.find(r => r.success);
|
|
1976
|
+
return first ? { result: first.result } : { error: 'All branches failed' };
|
|
1977
|
+
}
|
|
1978
|
+
return results.map(r => r.result);
|
|
1979
|
+
}
|
|
1980
|
+
// ── Broadcast mode: container-based parallelism ──
|
|
1889
1981
|
// Get child nodes from parentId relationship
|
|
1890
1982
|
// Each child node becomes its own parallel "branch"
|
|
1891
1983
|
const childNodeIds = getChildNodeIds(node.id, workflowFile, data);
|
|
@@ -2110,49 +2202,272 @@ function executeMergeNode(node, context, workflowFile, skippedNodes) {
|
|
|
2110
2202
|
return merged;
|
|
2111
2203
|
}
|
|
2112
2204
|
/**
|
|
2113
|
-
* Execute a transformer node - applies
|
|
2205
|
+
* Execute a transformer node - applies data transformation via template or expression
|
|
2206
|
+
*
|
|
2207
|
+
* Modes:
|
|
2208
|
+
* - template: JSON template with {{ variable }} interpolation (default)
|
|
2209
|
+
* - expression: JavaScript expression evaluated via Function constructor
|
|
2210
|
+
* - jq: Reserved for future JQ-style query support
|
|
2114
2211
|
*/
|
|
2115
2212
|
function executeTransformerNode(node, context) {
|
|
2116
2213
|
const data = node.data;
|
|
2117
|
-
|
|
2214
|
+
const mode = data.mode || 'template';
|
|
2215
|
+
try {
|
|
2216
|
+
if (mode === 'expression') {
|
|
2217
|
+
return executeTransformerExpression(data, context);
|
|
2218
|
+
}
|
|
2219
|
+
// Template mode (default) — also handles legacy data.transform field
|
|
2220
|
+
return executeTransformerTemplate(data, context);
|
|
2221
|
+
}
|
|
2222
|
+
catch (error) {
|
|
2223
|
+
console.warn(`[TransformerNode] Transform error (mode=${mode}):`, error);
|
|
2224
|
+
if (data.passthroughOnError) {
|
|
2225
|
+
return context.previous_output;
|
|
2226
|
+
}
|
|
2227
|
+
throw error;
|
|
2228
|
+
}
|
|
2229
|
+
}
|
|
2230
|
+
/**
|
|
2231
|
+
* Execute transformer in template mode — JSON template with {{ }} variable interpolation
|
|
2232
|
+
*/
|
|
2233
|
+
function executeTransformerTemplate(data, context) {
|
|
2234
|
+
// Support both new 'template' field and legacy 'transform' field
|
|
2235
|
+
const transformTemplate = data.template || data.transform;
|
|
2236
|
+
if (!transformTemplate) {
|
|
2118
2237
|
return context.previous_output;
|
|
2119
2238
|
}
|
|
2120
|
-
//
|
|
2121
|
-
|
|
2239
|
+
// Find all {{ }} expressions and evaluate them
|
|
2240
|
+
const expressionRegex = /\{\{([^}]+)\}\}/g;
|
|
2241
|
+
const evaluatedTemplate = transformTemplate.replace(expressionRegex, (match) => {
|
|
2242
|
+
const value = evaluateExpression(match, context);
|
|
2243
|
+
// Convert to JSON-safe string representation
|
|
2244
|
+
if (value === undefined || value === null) {
|
|
2245
|
+
return 'null';
|
|
2246
|
+
}
|
|
2247
|
+
if (typeof value === 'string') {
|
|
2248
|
+
// Escape for JSON string context
|
|
2249
|
+
return JSON.stringify(value).slice(1, -1); // Remove surrounding quotes
|
|
2250
|
+
}
|
|
2251
|
+
if (typeof value === 'object') {
|
|
2252
|
+
return JSON.stringify(value);
|
|
2253
|
+
}
|
|
2254
|
+
return String(value);
|
|
2255
|
+
});
|
|
2256
|
+
// Try to parse the result as JSON
|
|
2122
2257
|
try {
|
|
2123
|
-
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2135
|
-
|
|
2136
|
-
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2258
|
+
return JSON.parse(evaluatedTemplate);
|
|
2259
|
+
}
|
|
2260
|
+
catch {
|
|
2261
|
+
// If not valid JSON, return as string
|
|
2262
|
+
return evaluatedTemplate;
|
|
2263
|
+
}
|
|
2264
|
+
}
|
|
2265
|
+
/**
|
|
2266
|
+
* Execute transformer in expression mode — JavaScript expression via Function constructor
|
|
2267
|
+
*
|
|
2268
|
+
* The expression has access to:
|
|
2269
|
+
* - previous_output / input: output from the connected upstream node
|
|
2270
|
+
* - Custom inputVariable name (defaults to 'input')
|
|
2271
|
+
* - workflow: workflow parameters
|
|
2272
|
+
* - All node outputs by node ID
|
|
2273
|
+
*/
|
|
2274
|
+
function executeTransformerExpression(data, context) {
|
|
2275
|
+
const expression = data.expression;
|
|
2276
|
+
if (!expression) {
|
|
2277
|
+
return context.previous_output;
|
|
2278
|
+
}
|
|
2279
|
+
const inputVarName = data.inputVariable || 'input';
|
|
2280
|
+
// Build the execution context with all available variables.
|
|
2281
|
+
// Node output keys (e.g. "web-search-1770610749352") contain hyphens which are not
|
|
2282
|
+
// valid JS identifiers. We sanitize them (hyphens -> underscores) so they can be
|
|
2283
|
+
// used as Function parameter names. Node labels are also added as aliases.
|
|
2284
|
+
const execContext = {};
|
|
2285
|
+
// Add node outputs with sanitized keys and label-based aliases
|
|
2286
|
+
for (const [key, value] of Object.entries(context.nodeOutputs)) {
|
|
2287
|
+
const sanitized = key.replace(/[^a-zA-Z0-9_$]/g, '_');
|
|
2288
|
+
execContext[sanitized] = value;
|
|
2289
|
+
}
|
|
2290
|
+
// Add workflow variables (these should already be valid identifiers)
|
|
2291
|
+
for (const [key, value] of Object.entries(context.variables)) {
|
|
2292
|
+
execContext[key] = value;
|
|
2293
|
+
}
|
|
2294
|
+
execContext.workflow = context.workflow;
|
|
2295
|
+
execContext.previous_output = context.previous_output;
|
|
2296
|
+
execContext.input = context.previous_output;
|
|
2297
|
+
execContext.previous_step = context.previous_output;
|
|
2298
|
+
// Also set the custom inputVariable name
|
|
2299
|
+
if (inputVarName !== 'input' && inputVarName !== 'previous_output') {
|
|
2300
|
+
execContext[inputVarName] = context.previous_output;
|
|
2301
|
+
}
|
|
2302
|
+
// Build parameter names and values for the Function constructor
|
|
2303
|
+
const paramNames = Object.keys(execContext);
|
|
2304
|
+
const paramValues = Object.values(execContext);
|
|
2305
|
+
// Wrap the expression in a function body that supports multi-line code with return
|
|
2306
|
+
// eslint-disable-next-line no-new-func
|
|
2307
|
+
const fn = new Function(...paramNames, expression);
|
|
2308
|
+
return fn(...paramValues);
|
|
2309
|
+
}
|
|
2310
|
+
/**
|
|
2311
|
+
* Execute code in a sandboxed context.
|
|
2312
|
+
*
|
|
2313
|
+
* - TS/JS: new Function() with context variables injected as parameters
|
|
2314
|
+
* - Python: Writes a temp .py file, passes input as JSON via stdin, reads JSON output
|
|
2315
|
+
* - C#: Writes a temp .csx file for dotnet-script, same stdin/stdout JSON pattern
|
|
2316
|
+
*
|
|
2317
|
+
* For Python/C#, the previous node's output is serialized as JSON and passed via
|
|
2318
|
+
* stdin. The script must print its result as JSON to stdout.
|
|
2319
|
+
*/
|
|
2320
|
+
function executeInlineCode(code, language, inputVarName, timeoutMs, nodeLabel, nodeId, context, executionContext = 'isolated') {
|
|
2321
|
+
if (!code) {
|
|
2322
|
+
return context.previous_output;
|
|
2323
|
+
}
|
|
2324
|
+
// TS/JS: execute via vm (isolated) or Function constructor (main)
|
|
2325
|
+
if (language === 'typescript' || language === 'javascript') {
|
|
2326
|
+
const execVars = {};
|
|
2327
|
+
// Sanitize node output keys — node IDs contain hyphens (e.g. "web-search-...")
|
|
2328
|
+
// which are not valid JS identifiers for Function constructor params
|
|
2329
|
+
for (const [key, value] of Object.entries(context.nodeOutputs)) {
|
|
2330
|
+
execVars[key.replace(/[^a-zA-Z0-9_$]/g, '_')] = value;
|
|
2331
|
+
}
|
|
2332
|
+
for (const [key, value] of Object.entries(context.variables)) {
|
|
2333
|
+
execVars[key] = value;
|
|
2334
|
+
}
|
|
2335
|
+
execVars.workflow = context.workflow;
|
|
2336
|
+
execVars.previous_output = context.previous_output;
|
|
2337
|
+
execVars.input = context.previous_output;
|
|
2338
|
+
execVars.previous_step = context.previous_output;
|
|
2339
|
+
if (inputVarName !== 'input' && inputVarName !== 'previous_output') {
|
|
2340
|
+
execVars[inputVarName] = context.previous_output;
|
|
2341
|
+
}
|
|
2342
|
+
if (executionContext === 'isolated') {
|
|
2343
|
+
// Sandboxed execution via Node.js vm module — no access to require, process, etc.
|
|
2344
|
+
const sandbox = {
|
|
2345
|
+
...execVars,
|
|
2346
|
+
console: { log: console.log, warn: console.warn, error: console.error },
|
|
2347
|
+
JSON,
|
|
2348
|
+
Math,
|
|
2349
|
+
Date,
|
|
2350
|
+
Array,
|
|
2351
|
+
Object,
|
|
2352
|
+
String,
|
|
2353
|
+
Number,
|
|
2354
|
+
Boolean,
|
|
2355
|
+
RegExp,
|
|
2356
|
+
Map,
|
|
2357
|
+
Set,
|
|
2358
|
+
Promise,
|
|
2359
|
+
parseInt,
|
|
2360
|
+
parseFloat,
|
|
2361
|
+
isNaN,
|
|
2362
|
+
isFinite,
|
|
2363
|
+
encodeURIComponent,
|
|
2364
|
+
decodeURIComponent,
|
|
2365
|
+
encodeURI,
|
|
2366
|
+
decodeURI,
|
|
2367
|
+
};
|
|
2368
|
+
const vmContext = vm_1.default.createContext(sandbox);
|
|
2369
|
+
// Wrap in an async IIFE so user code can use return statements
|
|
2370
|
+
const wrapped = `(function() {\n${code}\n})()`;
|
|
2371
|
+
const script = new vm_1.default.Script(wrapped, { filename: `${nodeLabel}.js` });
|
|
2372
|
+
return script.runInContext(vmContext, { timeout: timeoutMs });
|
|
2373
|
+
}
|
|
2374
|
+
// Main context — full access via Function constructor
|
|
2375
|
+
const paramNames = Object.keys(execVars);
|
|
2376
|
+
const paramValues = Object.values(execVars);
|
|
2377
|
+
// eslint-disable-next-line no-new-func
|
|
2378
|
+
const fn = new Function(...paramNames, code);
|
|
2379
|
+
return fn(...paramValues);
|
|
2380
|
+
}
|
|
2381
|
+
// Python: pass code via -c flag, input data via stdin as JSON
|
|
2382
|
+
if (language === 'python') {
|
|
2383
|
+
const inputJson = JSON.stringify(context.previous_output ?? null);
|
|
2384
|
+
// Wrap user code in a function so 'return' works, pipe input via stdin
|
|
2385
|
+
const wrapper = [
|
|
2386
|
+
'import sys, json',
|
|
2387
|
+
`${inputVarName} = json.loads(sys.stdin.read())`,
|
|
2388
|
+
'workflow = json.loads(' + JSON.stringify(JSON.stringify(context.workflow)) + ')',
|
|
2389
|
+
'def __user_fn__():',
|
|
2390
|
+
...code.split('\n').map(line => ' ' + line),
|
|
2391
|
+
'__result__ = __user_fn__()',
|
|
2392
|
+
'if __result__ is not None:',
|
|
2393
|
+
' print(json.dumps(__result__))',
|
|
2394
|
+
].join('\n');
|
|
2395
|
+
// execFileSync bypasses shell — args passed directly to process, no escaping needed
|
|
2396
|
+
const stdout = (0, child_process_1.execFileSync)('python', ['-c', wrapper], {
|
|
2397
|
+
input: inputJson,
|
|
2398
|
+
encoding: 'utf-8',
|
|
2399
|
+
timeout: timeoutMs,
|
|
2400
|
+
windowsHide: true,
|
|
2142
2401
|
});
|
|
2143
|
-
|
|
2402
|
+
const trimmed = stdout.trim();
|
|
2403
|
+
if (!trimmed)
|
|
2404
|
+
return context.previous_output;
|
|
2144
2405
|
try {
|
|
2145
|
-
return JSON.parse(
|
|
2406
|
+
return JSON.parse(trimmed);
|
|
2146
2407
|
}
|
|
2147
2408
|
catch {
|
|
2148
|
-
|
|
2149
|
-
|
|
2409
|
+
return trimmed;
|
|
2410
|
+
}
|
|
2411
|
+
}
|
|
2412
|
+
// C#: write temp .cs file, run via `dotnet <file>.cs` (.NET 10 file-based programs)
|
|
2413
|
+
// Uses System.Text.Json (built-in, no external NuGet needed) for JSON serialization
|
|
2414
|
+
if (language === 'csharp') {
|
|
2415
|
+
const inputJson = JSON.stringify(context.previous_output ?? null);
|
|
2416
|
+
const wrapper = [
|
|
2417
|
+
'using System;',
|
|
2418
|
+
'using System.IO;',
|
|
2419
|
+
'using System.Text.Json;',
|
|
2420
|
+
'using System.Text.Json.Nodes;',
|
|
2421
|
+
'',
|
|
2422
|
+
`var ${inputVarName} = JsonNode.Parse(Console.In.ReadToEnd());`,
|
|
2423
|
+
'var workflow = JsonNode.Parse(' + JSON.stringify(JSON.stringify(context.workflow)) + ');',
|
|
2424
|
+
'',
|
|
2425
|
+
code,
|
|
2426
|
+
].join('\n');
|
|
2427
|
+
const tempDir = (0, fs_1.mkdtempSync)((0, path_1.join)((0, os_1.tmpdir)(), 'prompd-code-'));
|
|
2428
|
+
const tempFile = (0, path_1.join)(tempDir, 'script.cs');
|
|
2429
|
+
try {
|
|
2430
|
+
(0, fs_1.writeFileSync)(tempFile, wrapper, 'utf-8');
|
|
2431
|
+
const stdout = (0, child_process_1.execFileSync)('dotnet', ['run', tempFile], {
|
|
2432
|
+
input: inputJson,
|
|
2433
|
+
encoding: 'utf-8',
|
|
2434
|
+
timeout: timeoutMs,
|
|
2435
|
+
windowsHide: true,
|
|
2436
|
+
});
|
|
2437
|
+
const trimmed = stdout.trim();
|
|
2438
|
+
if (!trimmed)
|
|
2439
|
+
return context.previous_output;
|
|
2440
|
+
try {
|
|
2441
|
+
return JSON.parse(trimmed);
|
|
2442
|
+
}
|
|
2443
|
+
catch {
|
|
2444
|
+
return trimmed;
|
|
2445
|
+
}
|
|
2446
|
+
}
|
|
2447
|
+
finally {
|
|
2448
|
+
try {
|
|
2449
|
+
(0, fs_1.unlinkSync)(tempFile);
|
|
2450
|
+
}
|
|
2451
|
+
catch { /* ignore cleanup errors */ }
|
|
2150
2452
|
}
|
|
2151
2453
|
}
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2454
|
+
throw new Error(`Unsupported code language '${language}'.\n` +
|
|
2455
|
+
`Supported: typescript, javascript, python, csharp.\n` +
|
|
2456
|
+
`Node: "${nodeLabel}" (${nodeId})`);
|
|
2457
|
+
}
|
|
2458
|
+
/**
|
|
2459
|
+
* Execute a code node — dispatches to executeInlineCode with CodeNodeData fields.
|
|
2460
|
+
*/
|
|
2461
|
+
function executeCodeNode(node, context) {
|
|
2462
|
+
const data = node.data;
|
|
2463
|
+
return executeInlineCode(data.code, data.language || 'javascript', data.inputVariable || 'input', data.timeoutMs ?? 30000, node.data.label, node.id, context, data.executionContext || 'isolated');
|
|
2464
|
+
}
|
|
2465
|
+
/**
|
|
2466
|
+
* Execute code from a ToolNode with toolType 'code'.
|
|
2467
|
+
* Same pattern as executeCodeNode but reads from ToolNodeData's code fields.
|
|
2468
|
+
*/
|
|
2469
|
+
function executeToolCodeSnippet(data, node, context) {
|
|
2470
|
+
return executeInlineCode(data.codeSnippet || '', data.codeLanguage || 'javascript', data.codeInputVariable || 'input', 30000, node.data.label, node.id, context, data.codeExecutionContext || 'isolated');
|
|
2156
2471
|
}
|
|
2157
2472
|
/**
|
|
2158
2473
|
* Execute a memory node - KV store, conversation history, or cache operations
|
|
@@ -2849,6 +3164,111 @@ async function executeWebSearchNode(node, context, options, trace) {
|
|
|
2849
3164
|
};
|
|
2850
3165
|
}
|
|
2851
3166
|
}
|
|
3167
|
+
/**
|
|
3168
|
+
* Execute a database query node - delegates to IPC via onToolCall
|
|
3169
|
+
*
|
|
3170
|
+
* Resolves the connection from the workflow's connections array to determine
|
|
3171
|
+
* the database type, then sends a ToolCallRequest with databaseConfig.
|
|
3172
|
+
* The actual DB driver execution happens in the Electron main process.
|
|
3173
|
+
*/
|
|
3174
|
+
async function executeDatabaseQueryNode(node, context, options, trace) {
|
|
3175
|
+
const data = node.data;
|
|
3176
|
+
// Resolve query template if it contains expressions
|
|
3177
|
+
let resolvedQuery = data.query || '';
|
|
3178
|
+
if (resolvedQuery.includes('{{')) {
|
|
3179
|
+
const result = evaluateExpression(resolvedQuery, context);
|
|
3180
|
+
resolvedQuery = typeof result === 'string' ? result : String(result);
|
|
3181
|
+
}
|
|
3182
|
+
// Resolve parameters template if present
|
|
3183
|
+
let resolvedParameters = data.parameters || '';
|
|
3184
|
+
if (resolvedParameters.includes('{{')) {
|
|
3185
|
+
const result = evaluateExpression(resolvedParameters, context);
|
|
3186
|
+
resolvedParameters = typeof result === 'string' ? result : String(result);
|
|
3187
|
+
}
|
|
3188
|
+
// Resolve collection template if present (MongoDB)
|
|
3189
|
+
let resolvedCollection = data.collection || '';
|
|
3190
|
+
if (resolvedCollection.includes('{{')) {
|
|
3191
|
+
const result = evaluateExpression(resolvedCollection, context);
|
|
3192
|
+
resolvedCollection = typeof result === 'string' ? result : String(result);
|
|
3193
|
+
}
|
|
3194
|
+
addTraceEntry(trace, {
|
|
3195
|
+
type: 'debug_step',
|
|
3196
|
+
nodeId: node.id,
|
|
3197
|
+
nodeName: node.data.label,
|
|
3198
|
+
nodeType: 'database-query',
|
|
3199
|
+
message: `Executing ${data.queryType || 'select'} query`,
|
|
3200
|
+
data: {
|
|
3201
|
+
connectionId: data.connectionId,
|
|
3202
|
+
queryType: data.queryType,
|
|
3203
|
+
query: resolvedQuery.length > 100 ? resolvedQuery.slice(0, 100) + '...' : resolvedQuery,
|
|
3204
|
+
collection: resolvedCollection || undefined,
|
|
3205
|
+
},
|
|
3206
|
+
}, options);
|
|
3207
|
+
if (options.onToolCall) {
|
|
3208
|
+
try {
|
|
3209
|
+
// dbType is resolved by the IPC handler (main.js) from the connection config.
|
|
3210
|
+
// We do not hardcode it here — the connection's dbType is the source of truth.
|
|
3211
|
+
const toolCallRequest = {
|
|
3212
|
+
nodeId: node.id,
|
|
3213
|
+
toolName: 'database-query',
|
|
3214
|
+
toolType: 'database-query',
|
|
3215
|
+
parameters: {},
|
|
3216
|
+
databaseConfig: {
|
|
3217
|
+
connectionId: data.connectionId,
|
|
3218
|
+
queryType: data.queryType || 'select',
|
|
3219
|
+
query: resolvedQuery,
|
|
3220
|
+
parameters: resolvedParameters || undefined,
|
|
3221
|
+
collection: resolvedCollection || undefined,
|
|
3222
|
+
maxRows: data.maxRows ?? 1000,
|
|
3223
|
+
timeoutMs: data.timeoutMs ?? 30000,
|
|
3224
|
+
},
|
|
3225
|
+
};
|
|
3226
|
+
const result = await options.onToolCall(toolCallRequest);
|
|
3227
|
+
if (!result.success) {
|
|
3228
|
+
throw new Error(result.error || 'Database query failed');
|
|
3229
|
+
}
|
|
3230
|
+
addTraceEntry(trace, {
|
|
3231
|
+
type: 'debug_step',
|
|
3232
|
+
nodeId: node.id,
|
|
3233
|
+
nodeName: node.data.label,
|
|
3234
|
+
nodeType: 'database-query',
|
|
3235
|
+
message: 'Database query completed successfully',
|
|
3236
|
+
data: {
|
|
3237
|
+
rowCount: Array.isArray(result.result) ? result.result.length : 1,
|
|
3238
|
+
},
|
|
3239
|
+
}, options);
|
|
3240
|
+
return result.result;
|
|
3241
|
+
}
|
|
3242
|
+
catch (error) {
|
|
3243
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
3244
|
+
addTraceEntry(trace, {
|
|
3245
|
+
type: 'node_error',
|
|
3246
|
+
nodeId: node.id,
|
|
3247
|
+
nodeName: node.data.label,
|
|
3248
|
+
nodeType: 'database-query',
|
|
3249
|
+
message: `Database query error: ${errorMessage}`,
|
|
3250
|
+
data: { error: errorMessage },
|
|
3251
|
+
}, options);
|
|
3252
|
+
throw error;
|
|
3253
|
+
}
|
|
3254
|
+
}
|
|
3255
|
+
else {
|
|
3256
|
+
addTraceEntry(trace, {
|
|
3257
|
+
type: 'debug_step',
|
|
3258
|
+
nodeId: node.id,
|
|
3259
|
+
nodeName: node.data.label,
|
|
3260
|
+
nodeType: 'database-query',
|
|
3261
|
+
message: 'Database query requires onToolCall callback',
|
|
3262
|
+
data: { connectionId: data.connectionId, query: resolvedQuery },
|
|
3263
|
+
}, options);
|
|
3264
|
+
return {
|
|
3265
|
+
skipped: true,
|
|
3266
|
+
reason: 'onToolCall callback required for database query execution',
|
|
3267
|
+
connectionId: data.connectionId,
|
|
3268
|
+
queryType: data.queryType,
|
|
3269
|
+
};
|
|
3270
|
+
}
|
|
3271
|
+
}
|
|
2852
3272
|
/**
|
|
2853
3273
|
* Execute an MCP tool node - calls external MCP server tools
|
|
2854
3274
|
*/
|
|
@@ -3041,6 +3461,39 @@ async function executeToolNode(node, context, options, trace) {
|
|
|
3041
3461
|
serverName: data.mcpServerName,
|
|
3042
3462
|
};
|
|
3043
3463
|
}
|
|
3464
|
+
// Code tools execute directly via Function constructor — no callback needed
|
|
3465
|
+
if (data.toolType === 'code') {
|
|
3466
|
+
try {
|
|
3467
|
+
const result = executeToolCodeSnippet(data, node, context);
|
|
3468
|
+
addTraceEntry(trace, {
|
|
3469
|
+
type: 'node_complete',
|
|
3470
|
+
nodeId: node.id,
|
|
3471
|
+
nodeName: node.data.label,
|
|
3472
|
+
nodeType: 'tool',
|
|
3473
|
+
message: `Code tool completed: ${toolNameDisplay}`,
|
|
3474
|
+
data: {
|
|
3475
|
+
toolName: data.toolName || '',
|
|
3476
|
+
toolNameDisplay,
|
|
3477
|
+
toolType: 'code',
|
|
3478
|
+
nodeLabel: node.data.label,
|
|
3479
|
+
result
|
|
3480
|
+
},
|
|
3481
|
+
}, options);
|
|
3482
|
+
// Apply output transform if defined
|
|
3483
|
+
if (data.outputTransform) {
|
|
3484
|
+
return evaluateExpression(data.outputTransform, {
|
|
3485
|
+
...context,
|
|
3486
|
+
nodeOutputs: { ...context.nodeOutputs, result },
|
|
3487
|
+
});
|
|
3488
|
+
}
|
|
3489
|
+
return result;
|
|
3490
|
+
}
|
|
3491
|
+
catch (error) {
|
|
3492
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
3493
|
+
throw new Error(`Code tool '${toolNameDisplay}' failed: ${errorMessage}\n` +
|
|
3494
|
+
`Node: "${node.data.label}" (${node.id})`);
|
|
3495
|
+
}
|
|
3496
|
+
}
|
|
3044
3497
|
// If no callback is provided, we can't execute the tool
|
|
3045
3498
|
if (!options.onToolCall) {
|
|
3046
3499
|
// For HTTP tools, we can execute directly
|