flowcraft 2.10.0 → 2.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/dist/adapter-DzeZVjSE.d.mts +133 -0
- package/dist/adapters/index.d.mts +2 -0
- package/dist/adapters/index.mjs +3 -0
- package/dist/adapters/persistent-event-bus.d.mts +2 -0
- package/dist/adapters/persistent-event-bus.mjs +59 -0
- package/dist/analysis-B5Twr7sD.d.mts +52 -0
- package/dist/analysis.d.mts +2 -0
- package/dist/analysis.mjs +164 -0
- package/dist/batch-gather-BhF-IzQR.d.mts +8 -0
- package/dist/batch-scatter-DD8TU0Wm.d.mts +8 -0
- package/dist/container-BKdd-9wf.d.mts +24 -0
- package/dist/container-factory-fDY2kkxt.d.mts +17 -0
- package/dist/container-factory.d.mts +2 -0
- package/dist/container-factory.mjs +23 -0
- package/dist/container.d.mts +2 -0
- package/dist/container.mjs +43 -0
- package/dist/context-ZVtzXuZu.d.mts +64 -0
- package/dist/context.d.mts +2 -0
- package/dist/context.mjs +145 -0
- package/dist/error-mapper-BAv_YQMQ.d.mts +14 -0
- package/dist/error-mapper.d.mts +2 -0
- package/dist/error-mapper.mjs +37 -0
- package/dist/errors-CyyIj3OO.d.mts +21 -0
- package/dist/errors.d.mts +2 -0
- package/dist/errors.mjs +24 -0
- package/dist/evaluator-Dnj5qJ92.d.mts +31 -0
- package/dist/evaluator.d.mts +2 -0
- package/dist/evaluator.mjs +80 -0
- package/dist/flow-CZGpYpl-.d.mts +94 -0
- package/dist/flow.d.mts +2 -0
- package/dist/flow.mjs +328 -0
- package/dist/index-9iG2qHLe.d.mts +1 -0
- package/dist/index-Bk0eNZmQ.d.mts +1 -0
- package/dist/index-CNgSR_kt.d.mts +1 -0
- package/dist/index-CW2WHUXP.d.mts +1 -0
- package/dist/index.d.mts +24 -1
- package/dist/index.mjs +31 -791
- package/dist/linter-B8KALEae.d.mts +25 -0
- package/dist/linter.d.mts +2 -0
- package/dist/linter.mjs +74 -0
- package/dist/logger-BvDgvNHQ.d.mts +19 -0
- package/dist/logger.d.mts +2 -0
- package/dist/logger.mjs +26 -0
- package/dist/node.d.mts +2 -0
- package/dist/node.mjs +55 -0
- package/dist/nodes/batch-gather.d.mts +2 -0
- package/dist/nodes/batch-gather.mjs +47 -0
- package/dist/nodes/batch-scatter.d.mts +2 -0
- package/dist/nodes/batch-scatter.mjs +52 -0
- package/dist/nodes/index.d.mts +7 -0
- package/dist/nodes/index.mjs +8 -0
- package/dist/nodes/sleep.d.mts +2 -0
- package/dist/nodes/sleep.mjs +41 -0
- package/dist/nodes/subflow.d.mts +2 -0
- package/dist/nodes/subflow.mjs +64 -0
- package/dist/nodes/wait.d.mts +2 -0
- package/dist/nodes/wait.mjs +12 -0
- package/dist/nodes/webhook.d.mts +2 -0
- package/dist/nodes/webhook.mjs +24 -0
- package/dist/orchestrator-DwMIJRFI.d.mts +8 -0
- package/dist/persistent-event-bus-COiQOpWh.d.mts +68 -0
- package/dist/replay-CVOy6d_L.d.mts +44 -0
- package/dist/runtime/adapter.d.mts +2 -0
- package/dist/runtime/adapter.mjs +349 -0
- package/dist/runtime/builtin-keys.d.mts +37 -0
- package/dist/runtime/builtin-keys.mjs +12 -0
- package/dist/runtime/execution-context.d.mts +2 -0
- package/dist/runtime/execution-context.mjs +26 -0
- package/dist/runtime/executors.d.mts +2 -0
- package/dist/runtime/executors.mjs +259 -0
- package/dist/runtime/index.d.mts +6 -0
- package/dist/runtime/index.mjs +10 -0
- package/dist/runtime/node-executor-factory.d.mts +11 -0
- package/dist/runtime/node-executor-factory.mjs +41 -0
- package/dist/runtime/orchestrator.d.mts +2 -0
- package/dist/runtime/orchestrator.mjs +41 -0
- package/dist/runtime/orchestrators/replay.d.mts +2 -0
- package/dist/{replay-BB11M6K1.mjs → runtime/orchestrators/replay.mjs} +1 -20
- package/dist/runtime/orchestrators/step-by-step.d.mts +15 -0
- package/dist/runtime/orchestrators/step-by-step.mjs +41 -0
- package/dist/runtime/orchestrators/utils.d.mts +2 -0
- package/dist/runtime/orchestrators/utils.mjs +79 -0
- package/dist/runtime/runtime.d.mts +2 -0
- package/dist/runtime/runtime.mjs +425 -0
- package/dist/runtime/scheduler.d.mts +2 -0
- package/dist/runtime/scheduler.mjs +64 -0
- package/dist/runtime/state.d.mts +2 -0
- package/dist/runtime/state.mjs +127 -0
- package/dist/runtime/traverser.d.mts +2 -0
- package/dist/runtime/traverser.mjs +213 -0
- package/dist/runtime/types.d.mts +2 -0
- package/dist/runtime/types.mjs +1 -0
- package/dist/runtime/workflow-logic-handler.d.mts +16 -0
- package/dist/runtime/workflow-logic-handler.mjs +159 -0
- package/dist/sanitizer-Bi00YjvO.d.mts +11 -0
- package/dist/sanitizer.d.mts +2 -0
- package/dist/sanitizer.mjs +37 -0
- package/dist/sdk.d.mts +1 -2
- package/dist/sdk.mjs +1 -2
- package/dist/serializer-BnmJr13R.d.mts +17 -0
- package/dist/serializer.d.mts +2 -0
- package/dist/serializer.mjs +34 -0
- package/dist/sleep-DpwYaY5b.d.mts +8 -0
- package/dist/subflow-n2IMsRe2.d.mts +8 -0
- package/dist/testing/event-logger.d.mts +62 -0
- package/dist/testing/event-logger.mjs +98 -0
- package/dist/testing/index.d.mts +5 -172
- package/dist/testing/index.mjs +6 -276
- package/dist/testing/run-with-trace.d.mts +37 -0
- package/dist/testing/run-with-trace.mjs +49 -0
- package/dist/testing/stepper.d.mts +78 -0
- package/dist/testing/stepper.mjs +100 -0
- package/dist/types-BcrXJEPI.d.mts +687 -0
- package/dist/types.d.mts +2 -0
- package/dist/types.mjs +1 -0
- package/dist/utils-BUEgr9V2.d.mts +34 -0
- package/dist/wait-2Q-LA7V7.d.mts +8 -0
- package/dist/webhook-BiCm-HLx.d.mts +12 -0
- package/package.json +4 -4
- package/dist/index-BXRN44Qf.d.mts +0 -1347
- package/dist/index.mjs.map +0 -1
- package/dist/replay-BB11M6K1.mjs.map +0 -1
- package/dist/runtime-ChsWirQN.mjs +0 -2256
- package/dist/runtime-ChsWirQN.mjs.map +0 -1
- package/dist/sdk.mjs.map +0 -1
- package/dist/testing/index.mjs.map +0 -1
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
import { analyzeBlueprint } from "../analysis.mjs";
|
|
2
|
+
import { ConsoleLogger } from "../logger.mjs";
|
|
3
|
+
import { TrackedAsyncContext } from "../context.mjs";
|
|
4
|
+
import { WorkflowState } from "./state.mjs";
|
|
5
|
+
import { JsonSerializer } from "../serializer.mjs";
|
|
6
|
+
import { FlowRuntime } from "./runtime.mjs";
|
|
7
|
+
|
|
8
|
+
//#region src/runtime/adapter.ts
|
|
9
|
+
/**
|
|
10
|
+
* The base class for all distributed adapters. It handles the technology-agnostic
|
|
11
|
+
* orchestration logic and leaves queue-specific implementation to subclasses.
|
|
12
|
+
*/
|
|
13
|
+
var BaseDistributedAdapter = class {
|
|
14
|
+
runtime;
|
|
15
|
+
store;
|
|
16
|
+
serializer;
|
|
17
|
+
logger;
|
|
18
|
+
eventBus;
|
|
19
|
+
constructor(options) {
|
|
20
|
+
this.runtime = new FlowRuntime({
|
|
21
|
+
...options.runtimeOptions,
|
|
22
|
+
dependencies: {
|
|
23
|
+
...options.runtimeOptions.dependencies,
|
|
24
|
+
adapter: this
|
|
25
|
+
}
|
|
26
|
+
});
|
|
27
|
+
this.store = options.coordinationStore;
|
|
28
|
+
this.serializer = options.runtimeOptions.serializer || new JsonSerializer();
|
|
29
|
+
this.logger = options.runtimeOptions.logger || new ConsoleLogger();
|
|
30
|
+
this.eventBus = options.eventBus;
|
|
31
|
+
this.logger.info("[Adapter] BaseDistributedAdapter initialized.");
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Starts the worker, which begins listening for and processing jobs from the queue.
|
|
35
|
+
*/
|
|
36
|
+
start() {
|
|
37
|
+
this.logger.info("[Adapter] Starting worker...");
|
|
38
|
+
this.processJobs(this.handleJob.bind(this));
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Hook called by the execution factory to determine if a node's automatic
|
|
42
|
+
* retries should be executed synchronously in-process (true) or delegated
|
|
43
|
+
* to the Queue backoff behavior configured by the adapter (false).
|
|
44
|
+
*/
|
|
45
|
+
shouldRetryInProcess(_nodeDef) {
|
|
46
|
+
return true;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Returns queue-level retry options for enqueuing successor jobs.
|
|
50
|
+
* Only used when shouldRetryInProcess() returns false.
|
|
51
|
+
*/
|
|
52
|
+
getQueueRetryOptions(_nodeDef) {}
|
|
53
|
+
/**
|
|
54
|
+
* Hook called at the start of job processing. Subclasses can override this
|
|
55
|
+
* to perform additional setup (e.g., timestamp tracking for reconciliation).
|
|
56
|
+
*/
|
|
57
|
+
async onJobStart(_runId, _blueprintId, _nodeId) {}
|
|
58
|
+
/**
|
|
59
|
+
* The main handler for processing a single job from the queue.
|
|
60
|
+
*/
|
|
61
|
+
async handleJob(job) {
|
|
62
|
+
const { runId, blueprintId, nodeId } = job;
|
|
63
|
+
const startTime = Date.now();
|
|
64
|
+
await this.onJobStart(runId, blueprintId, nodeId);
|
|
65
|
+
const blueprint = this.runtime.options.blueprints?.[blueprintId];
|
|
66
|
+
if (!blueprint) {
|
|
67
|
+
const reason = `Blueprint with ID '${blueprintId}' not found in the worker's runtime registry.`;
|
|
68
|
+
this.logger.error(`[Adapter] FATAL: ${reason}`);
|
|
69
|
+
await this.publishFinalResult(runId, {
|
|
70
|
+
status: "failed",
|
|
71
|
+
reason
|
|
72
|
+
});
|
|
73
|
+
return;
|
|
74
|
+
}
|
|
75
|
+
const context = this.createContext(runId);
|
|
76
|
+
const storedVersion = await context.get("blueprintVersion");
|
|
77
|
+
const currentVersion = blueprint.metadata?.version || null;
|
|
78
|
+
if (storedVersion !== currentVersion) {
|
|
79
|
+
const reason = `Blueprint version mismatch: stored version '${storedVersion}', current version '${currentVersion}'. Rejecting job to prevent state corruption.`;
|
|
80
|
+
this.logger.warn(`[Adapter] Version mismatch for run ${runId}, node ${nodeId}: ${reason}`);
|
|
81
|
+
return;
|
|
82
|
+
}
|
|
83
|
+
if (!await context.has("blueprintId")) {
|
|
84
|
+
await context.set("blueprintId", blueprintId);
|
|
85
|
+
await context.set("blueprintVersion", blueprint.metadata?.version || null);
|
|
86
|
+
const blueprintKey = `flowcraft:blueprint:${runId}`;
|
|
87
|
+
await this.store.setIfNotExist(blueprintKey, blueprintId, 3600);
|
|
88
|
+
}
|
|
89
|
+
const joinLockKey = `flowcraft:joinlock:${runId}:${nodeId}`;
|
|
90
|
+
const fanInKey = `flowcraft:fanin:${runId}:${nodeId}`;
|
|
91
|
+
const blueprintKey = `flowcraft:blueprint:${runId}`;
|
|
92
|
+
const heartbeatInterval = setInterval(async () => {
|
|
93
|
+
await this.store.extendTTL(joinLockKey, 3600);
|
|
94
|
+
await this.store.extendTTL(fanInKey, 3600);
|
|
95
|
+
await this.store.extendTTL(blueprintKey, 3600);
|
|
96
|
+
this.logger.debug(`[Adapter] Extended TTLs for run ${runId}, node ${nodeId}`);
|
|
97
|
+
}, 18e5);
|
|
98
|
+
try {
|
|
99
|
+
const state = new WorkflowState(await context.toJSON(), context);
|
|
100
|
+
state.isLastAttempt = job.isLastAttempt !== false;
|
|
101
|
+
let result;
|
|
102
|
+
if (await context.has(`_outputs.${nodeId}`)) {
|
|
103
|
+
this.logger.info(`[Adapter] Node '${nodeId}' already completed, skipping re-execution.`);
|
|
104
|
+
result = { output: await context.get(`_outputs.${nodeId}`) };
|
|
105
|
+
} else {
|
|
106
|
+
const nodeDef = blueprint.nodes.find((n) => n.id === nodeId);
|
|
107
|
+
if (nodeDef && !this.shouldRetryInProcess(nodeDef)) {
|
|
108
|
+
if (nodeDef.config?.maxRetries && nodeDef.config.maxRetries > 1) nodeDef.config.maxRetries = 1;
|
|
109
|
+
}
|
|
110
|
+
result = await this.runtime.executeNode(blueprint, nodeId, state);
|
|
111
|
+
await context.set(`_outputs.${nodeId}`, result.output);
|
|
112
|
+
const stateContext = state.getContext();
|
|
113
|
+
if (stateContext instanceof TrackedAsyncContext) {
|
|
114
|
+
const deltas = stateContext.getDeltas();
|
|
115
|
+
if (deltas.length > 0) {
|
|
116
|
+
await stateContext.patch(deltas);
|
|
117
|
+
stateContext.clearDeltas();
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
await this.handleNodeCompletion({
|
|
122
|
+
runId,
|
|
123
|
+
blueprintId,
|
|
124
|
+
nodeId,
|
|
125
|
+
blueprint,
|
|
126
|
+
result,
|
|
127
|
+
context,
|
|
128
|
+
startTime,
|
|
129
|
+
heartbeatInterval
|
|
130
|
+
});
|
|
131
|
+
} catch (error) {
|
|
132
|
+
const reason = error.message || "Unknown execution error";
|
|
133
|
+
this.logger.error(`[Adapter] FATAL: Job for node '${nodeId}' failed for Run ID '${runId}': ${reason}`);
|
|
134
|
+
if (this.eventBus) await this.eventBus.emit({
|
|
135
|
+
type: "job:failed",
|
|
136
|
+
payload: {
|
|
137
|
+
runId,
|
|
138
|
+
blueprintId,
|
|
139
|
+
nodeId,
|
|
140
|
+
error
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
if (job.isLastAttempt === false) {
|
|
144
|
+
this.logger.warn(`[Adapter] Job for node '${nodeId}' failed (queue attempt ${job.attempt || 1}), delegating retry to queue.`);
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
await this.publishFinalResult(runId, {
|
|
148
|
+
status: "failed",
|
|
149
|
+
reason
|
|
150
|
+
});
|
|
151
|
+
await this.writePoisonPillForSuccessors(runId, blueprint, nodeId);
|
|
152
|
+
} finally {
|
|
153
|
+
clearInterval(heartbeatInterval);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Handles post-execution logic: terminal node checks, successor determination,
|
|
158
|
+
* and enqueueing of ready nodes. Extracted to support the idempotency guard.
|
|
159
|
+
*/
|
|
160
|
+
async handleNodeCompletion(params) {
|
|
161
|
+
const { runId, blueprintId, nodeId, blueprint, result, context, startTime, heartbeatInterval } = params;
|
|
162
|
+
const analysis = analyzeBlueprint(blueprint);
|
|
163
|
+
const isTerminalNode = analysis.terminalNodeIds.includes(nodeId);
|
|
164
|
+
if (isTerminalNode) {
|
|
165
|
+
const allContextKeys = Object.keys(await context.toJSON());
|
|
166
|
+
const completedNodes = /* @__PURE__ */ new Set();
|
|
167
|
+
for (const key of allContextKeys) if (key.startsWith("_outputs.")) completedNodes.add(key.substring(9));
|
|
168
|
+
if (analysis.terminalNodeIds.every((terminalId) => completedNodes.has(terminalId))) {
|
|
169
|
+
this.logger.info(`[Adapter] All terminal nodes completed for Run ID: ${runId}. Declaring workflow complete.`);
|
|
170
|
+
const finalContext = await context.toJSON();
|
|
171
|
+
const finalResult = {
|
|
172
|
+
context: finalContext,
|
|
173
|
+
serializedContext: this.serializer.serialize(finalContext),
|
|
174
|
+
status: "completed"
|
|
175
|
+
};
|
|
176
|
+
await this.publishFinalResult(runId, {
|
|
177
|
+
status: "completed",
|
|
178
|
+
payload: finalResult
|
|
179
|
+
});
|
|
180
|
+
clearInterval(heartbeatInterval);
|
|
181
|
+
return;
|
|
182
|
+
} else this.logger.info(`[Adapter] Terminal node '${nodeId}' completed for Run ID '${runId}', but other terminal nodes are still running.`);
|
|
183
|
+
}
|
|
184
|
+
const nextNodes = await this.runtime.determineNextNodes(blueprint, nodeId, result, context, runId);
|
|
185
|
+
if (nextNodes.length === 0 && !isTerminalNode) {
|
|
186
|
+
this.logger.info(`[Adapter] Non-terminal node '${nodeId}' reached end of branch for Run ID '${runId}'. This branch will now terminate.`);
|
|
187
|
+
clearInterval(heartbeatInterval);
|
|
188
|
+
return;
|
|
189
|
+
}
|
|
190
|
+
for (const { node: nextNodeDef, edge } of nextNodes) {
|
|
191
|
+
await this.runtime.applyEdgeTransform(edge, result, nextNodeDef, context, void 0, runId);
|
|
192
|
+
if (await this.isReadyForFanIn(runId, blueprint, nextNodeDef.id)) {
|
|
193
|
+
this.logger.info(`[Adapter] Node '${nextNodeDef.id}' is ready. Enqueuing job.`);
|
|
194
|
+
await this.enqueueJob({
|
|
195
|
+
runId,
|
|
196
|
+
blueprintId,
|
|
197
|
+
nodeId: nextNodeDef.id
|
|
198
|
+
});
|
|
199
|
+
if (this.eventBus) await this.eventBus.emit({
|
|
200
|
+
type: "job:enqueued",
|
|
201
|
+
payload: {
|
|
202
|
+
runId,
|
|
203
|
+
blueprintId,
|
|
204
|
+
nodeId: nextNodeDef.id
|
|
205
|
+
}
|
|
206
|
+
});
|
|
207
|
+
} else this.logger.info(`[Adapter] Node '${nextNodeDef.id}' is waiting for other predecessors to complete.`);
|
|
208
|
+
}
|
|
209
|
+
const duration = Date.now() - startTime;
|
|
210
|
+
if (this.eventBus) await this.eventBus.emit({
|
|
211
|
+
type: "job:processed",
|
|
212
|
+
payload: {
|
|
213
|
+
runId,
|
|
214
|
+
blueprintId,
|
|
215
|
+
nodeId,
|
|
216
|
+
duration,
|
|
217
|
+
success: true
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Encapsulates the fan-in join logic using the coordination store.
|
|
223
|
+
*/
|
|
224
|
+
async isReadyForFanIn(runId, blueprint, targetNodeId) {
|
|
225
|
+
const targetNode = blueprint.nodes.find((n) => n.id === targetNodeId);
|
|
226
|
+
if (!targetNode) throw new Error(`Node '${targetNodeId}' not found in blueprint`);
|
|
227
|
+
const joinStrategy = targetNode.config?.joinStrategy || "all";
|
|
228
|
+
const predecessors = blueprint.edges.filter((e) => e.target === targetNodeId);
|
|
229
|
+
if (predecessors.length <= 1) return true;
|
|
230
|
+
const poisonKey = `flowcraft:fanin:poison:${runId}:${targetNodeId}`;
|
|
231
|
+
if (await this.store.get(poisonKey)) {
|
|
232
|
+
this.logger.info(`[Adapter] Node '${targetNodeId}' is poisoned due to failed predecessor. Failing immediately.`);
|
|
233
|
+
throw new Error(`Node '${targetNodeId}' failed due to poisoned predecessor in run '${runId}'`);
|
|
234
|
+
}
|
|
235
|
+
if (joinStrategy === "any") {
|
|
236
|
+
const lockKey = `flowcraft:joinlock:${runId}:${targetNodeId}`;
|
|
237
|
+
if (!await this.store.setIfNotExist(lockKey, "locked", 3600)) {
|
|
238
|
+
const cancelKey = `flowcraft:fanin:cancel:${runId}:${targetNodeId}`;
|
|
239
|
+
if (!await this.store.setIfNotExist(cancelKey, "cancelled", 3600)) {
|
|
240
|
+
this.logger.info(`[Adapter] Node '${targetNodeId}' is cancelled due to failed predecessor. Failing immediately.`);
|
|
241
|
+
throw new Error(`Node '${targetNodeId}' failed due to cancelled predecessor in run '${runId}'`);
|
|
242
|
+
}
|
|
243
|
+
return false;
|
|
244
|
+
}
|
|
245
|
+
return true;
|
|
246
|
+
} else {
|
|
247
|
+
const fanInKey = `flowcraft:fanin:${runId}:${targetNodeId}`;
|
|
248
|
+
if (await this.store.increment(fanInKey, 3600) >= predecessors.length) {
|
|
249
|
+
await this.store.delete(fanInKey);
|
|
250
|
+
return true;
|
|
251
|
+
}
|
|
252
|
+
return false;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
/**
|
|
256
|
+
* Reconciles the state of a workflow run. It inspects the persisted
|
|
257
|
+
* context to find completed nodes, determines the next set of executable
|
|
258
|
+
* nodes (the frontier), and enqueues jobs for them if they aren't
|
|
259
|
+
* already running. This is the core of the resume functionality.
|
|
260
|
+
*
|
|
261
|
+
* @param runId The unique ID of the workflow execution to reconcile.
|
|
262
|
+
* @returns The set of node IDs that were enqueued for execution.
|
|
263
|
+
*/
|
|
264
|
+
async reconcile(runId) {
|
|
265
|
+
const context = this.createContext(runId);
|
|
266
|
+
let blueprintId = await context.get("blueprintId");
|
|
267
|
+
if (!blueprintId) {
|
|
268
|
+
const blueprintKey = `flowcraft:blueprint:${runId}`;
|
|
269
|
+
blueprintId = await this.store.get(blueprintKey);
|
|
270
|
+
if (blueprintId) await context.set("blueprintId", blueprintId);
|
|
271
|
+
else throw new Error(`Cannot reconcile runId '${runId}': blueprintId not found in context or coordination store.`);
|
|
272
|
+
}
|
|
273
|
+
const blueprint = this.runtime.options.blueprints?.[blueprintId];
|
|
274
|
+
if (blueprint && !await context.has("blueprintVersion")) await context.set("blueprintVersion", blueprint.metadata?.version || null);
|
|
275
|
+
if (!blueprint) throw new Error(`Cannot reconcile runId '${runId}': Blueprint with ID '${blueprintId}' not found.`);
|
|
276
|
+
const state = await context.toJSON();
|
|
277
|
+
const completedNodes = /* @__PURE__ */ new Set();
|
|
278
|
+
for (const key of Object.keys(state)) if (key.startsWith("_outputs.")) completedNodes.add(key.substring(9));
|
|
279
|
+
const frontier = this.calculateResumedFrontier(blueprint, completedNodes);
|
|
280
|
+
const enqueuedNodes = /* @__PURE__ */ new Set();
|
|
281
|
+
for (const nodeId of frontier) {
|
|
282
|
+
const joinStrategy = blueprint.nodes.find((n) => n.id === nodeId)?.config?.joinStrategy || "all";
|
|
283
|
+
const poisonKey = `flowcraft:fanin:poison:${runId}:${nodeId}`;
|
|
284
|
+
if (await this.store.get(poisonKey)) {
|
|
285
|
+
this.logger.info(`[Adapter] Reconciling: Node '${nodeId}' is poisoned, skipping.`, { runId });
|
|
286
|
+
continue;
|
|
287
|
+
}
|
|
288
|
+
let shouldEnqueue = false;
|
|
289
|
+
if (joinStrategy === "any") {
|
|
290
|
+
const lockKey = `flowcraft:joinlock:${runId}:${nodeId}`;
|
|
291
|
+
if (await this.store.setIfNotExist(lockKey, "locked-by-reconcile", 3600)) shouldEnqueue = true;
|
|
292
|
+
else this.logger.info(`[Adapter] Reconciling: Node '${nodeId}' is an 'any' join and is already locked.`, { runId });
|
|
293
|
+
} else {
|
|
294
|
+
const lockKey = `flowcraft:nodelock:${runId}:${nodeId}`;
|
|
295
|
+
if (await this.store.setIfNotExist(lockKey, "locked", 120)) shouldEnqueue = true;
|
|
296
|
+
else this.logger.info(`[Adapter] Reconciling: Node '${nodeId}' is already locked.`, { runId });
|
|
297
|
+
}
|
|
298
|
+
if (shouldEnqueue) {
|
|
299
|
+
this.logger.info(`[Adapter] Reconciling: Enqueuing ready job for node '${nodeId}'`, { runId });
|
|
300
|
+
await this.enqueueJob({
|
|
301
|
+
runId,
|
|
302
|
+
blueprintId: blueprint.id,
|
|
303
|
+
nodeId
|
|
304
|
+
});
|
|
305
|
+
enqueuedNodes.add(nodeId);
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
return enqueuedNodes;
|
|
309
|
+
}
|
|
310
|
+
calculateResumedFrontier(blueprint, completedNodes) {
|
|
311
|
+
const newFrontier = /* @__PURE__ */ new Set();
|
|
312
|
+
const allPredecessors = /* @__PURE__ */ new Map();
|
|
313
|
+
for (const node of blueprint.nodes) allPredecessors.set(node.id, /* @__PURE__ */ new Set());
|
|
314
|
+
for (const edge of blueprint.edges) allPredecessors.get(edge.target)?.add(edge.source);
|
|
315
|
+
for (const node of blueprint.nodes) {
|
|
316
|
+
if (completedNodes.has(node.id)) continue;
|
|
317
|
+
const predecessors = allPredecessors.get(node.id) ?? /* @__PURE__ */ new Set();
|
|
318
|
+
if (predecessors.size === 0 && !completedNodes.has(node.id)) {
|
|
319
|
+
newFrontier.add(node.id);
|
|
320
|
+
continue;
|
|
321
|
+
}
|
|
322
|
+
const joinStrategy = node.config?.joinStrategy || "all";
|
|
323
|
+
const completedPredecessors = [...predecessors].filter((p) => completedNodes.has(p));
|
|
324
|
+
if (joinStrategy === "any" ? completedPredecessors.length > 0 : completedPredecessors.length === predecessors.size) newFrontier.add(node.id);
|
|
325
|
+
}
|
|
326
|
+
return newFrontier;
|
|
327
|
+
}
|
|
328
|
+
/**
|
|
329
|
+
* Writes a poison pill for 'all' join successors and a cancellation pill for 'any' join successors of a failed node to prevent stalling or ambiguous states.
|
|
330
|
+
*/
|
|
331
|
+
async writePoisonPillForSuccessors(runId, blueprint, failedNodeId) {
|
|
332
|
+
const successors = blueprint.edges.filter((edge) => edge.source === failedNodeId).map((edge) => edge.target).map((targetId) => blueprint.nodes.find((node) => node.id === targetId)).filter((node) => node);
|
|
333
|
+
for (const successor of successors) if (successor) {
|
|
334
|
+
const joinStrategy = successor.config?.joinStrategy || "all";
|
|
335
|
+
if (joinStrategy === "all") {
|
|
336
|
+
const poisonKey = `flowcraft:fanin:poison:${runId}:${successor.id}`;
|
|
337
|
+
await this.store.setIfNotExist(poisonKey, "poisoned", 3600);
|
|
338
|
+
this.logger.info(`[Adapter] Wrote poison pill for 'all' join node '${successor.id}' due to failed predecessor '${failedNodeId}'`);
|
|
339
|
+
} else if (joinStrategy === "any") {
|
|
340
|
+
const cancelKey = `flowcraft:fanin:cancel:${runId}:${successor.id}`;
|
|
341
|
+
await this.store.setIfNotExist(cancelKey, "cancelled", 3600);
|
|
342
|
+
this.logger.info(`[Adapter] Wrote cancellation pill for 'any' join node '${successor.id}' due to failed predecessor '${failedNodeId}'`);
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
};
|
|
347
|
+
|
|
348
|
+
//#endregion
|
|
349
|
+
export { BaseDistributedAdapter };
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
//#region src/runtime/builtin-keys.d.ts
|
|
2
|
+
/**
|
|
3
|
+
* A registry defining the dynamic context keys set by each built-in node.
|
|
4
|
+
* This object is the single source of truth.
|
|
5
|
+
*/
|
|
6
|
+
declare const BUILTIN_KEYS: {
|
|
7
|
+
readonly 'batch-scatter': readonly ["currentIndex", "hasMore"];
|
|
8
|
+
readonly 'batch-gather': readonly ["allWorkerIds", "hasMore"];
|
|
9
|
+
};
|
|
10
|
+
/**
|
|
11
|
+
* A comprehensive map of all possible dynamic keys to their corresponding TypeScript types.
|
|
12
|
+
* The compiler will enforce that any key used in `BUILTIN_KEYS` must have an entry here.
|
|
13
|
+
*/
|
|
14
|
+
type DynamicKeyTypeMap = {
|
|
15
|
+
currentIndex: number;
|
|
16
|
+
hasMore: boolean;
|
|
17
|
+
allWorkerIds: string[];
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* A utility type that creates a union of all possible dynamic key strings
|
|
21
|
+
* by flattening the values of the `BUILTIN_KEYS` object.
|
|
22
|
+
*
|
|
23
|
+
* Example: 'currentIndex' | 'hasMore' | 'allWorkerIds'
|
|
24
|
+
*/
|
|
25
|
+
type AllDynamicKeyStrings = (typeof BUILTIN_KEYS)[keyof typeof BUILTIN_KEYS][number];
|
|
26
|
+
/**
|
|
27
|
+
* The final, dynamically generated `DynamicKeys` type.
|
|
28
|
+
*
|
|
29
|
+
* It uses a mapped type to construct an object where:
|
|
30
|
+
* - The keys are the union of all strings from `AllDynamicKeyStrings`.
|
|
31
|
+
* - The value for each key is looked up from our central `DynamicKeyTypeMap`.
|
|
32
|
+
*
|
|
33
|
+
* This ensures that `DynamicKeys` is always perfectly in sync with `BUILTIN_KEYS`.
|
|
34
|
+
*/
|
|
35
|
+
type DynamicKeys = { [K in AllDynamicKeyStrings]: DynamicKeyTypeMap[K] };
|
|
36
|
+
//#endregion
|
|
37
|
+
export { BUILTIN_KEYS, DynamicKeys };
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
//#region src/runtime/builtin-keys.ts
|
|
2
|
+
/**
|
|
3
|
+
* A registry defining the dynamic context keys set by each built-in node.
|
|
4
|
+
* This object is the single source of truth.
|
|
5
|
+
*/
|
|
6
|
+
const BUILTIN_KEYS = {
|
|
7
|
+
"batch-scatter": ["currentIndex", "hasMore"],
|
|
8
|
+
"batch-gather": ["allWorkerIds", "hasMore"]
|
|
9
|
+
};
|
|
10
|
+
|
|
11
|
+
//#endregion
|
|
12
|
+
export { BUILTIN_KEYS };
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { WorkflowState } from "./state.mjs";
|
|
2
|
+
|
|
3
|
+
//#region src/runtime/execution-context.ts
|
|
4
|
+
/**
|
|
5
|
+
* A container for all state and dependencies of a single workflow execution.
|
|
6
|
+
* This object is created once per `run` and passed through the execution stack.
|
|
7
|
+
*/
|
|
8
|
+
var ExecutionContext = class ExecutionContext {
|
|
9
|
+
constructor(blueprint, state, nodeRegistry, executionId, runtime, services, signal, concurrency) {
|
|
10
|
+
this.blueprint = blueprint;
|
|
11
|
+
this.state = state;
|
|
12
|
+
this.nodeRegistry = nodeRegistry;
|
|
13
|
+
this.executionId = executionId;
|
|
14
|
+
this.runtime = runtime;
|
|
15
|
+
this.services = services;
|
|
16
|
+
this.signal = signal;
|
|
17
|
+
this.concurrency = concurrency;
|
|
18
|
+
this.state.setEventEmitter(this.services.eventBus, this.executionId);
|
|
19
|
+
}
|
|
20
|
+
createForSubflow(subBlueprint, initialSubState) {
|
|
21
|
+
return new ExecutionContext(subBlueprint, new WorkflowState(initialSubState), this.nodeRegistry, this.executionId, this.runtime, this.services, this.signal, this.concurrency);
|
|
22
|
+
}
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
//#endregion
|
|
26
|
+
export { ExecutionContext };
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
import { B as NodeExecutionResult, H as NodeExecutorConfig, L as ClassNodeExecutor, R as ExecutionStrategy, V as NodeExecutor, z as FunctionNodeExecutor } from "../types-BcrXJEPI.mjs";
|
|
2
|
+
export { ClassNodeExecutor, ExecutionStrategy, FunctionNodeExecutor, NodeExecutionResult, NodeExecutor, NodeExecutorConfig };
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
import { FlowcraftError } from "../errors.mjs";
|
|
2
|
+
|
|
3
|
+
//#region src/runtime/executors.ts
|
|
4
|
+
async function withRetries(executor, maxRetries, nodeDef, context, executionId, signal, eventBus) {
|
|
5
|
+
let lastError;
|
|
6
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) try {
|
|
7
|
+
signal?.throwIfAborted();
|
|
8
|
+
const result = await executor();
|
|
9
|
+
if (attempt > 1) context.dependencies.logger.info(`Node execution succeeded after retry`, {
|
|
10
|
+
nodeId: nodeDef.id,
|
|
11
|
+
attempt,
|
|
12
|
+
executionId
|
|
13
|
+
});
|
|
14
|
+
return result;
|
|
15
|
+
} catch (error) {
|
|
16
|
+
lastError = error;
|
|
17
|
+
if (error instanceof DOMException && error.name === "AbortError") throw new FlowcraftError("Workflow cancelled", { isFatal: false });
|
|
18
|
+
if (error instanceof FlowcraftError && error.isFatal) break;
|
|
19
|
+
if (attempt < maxRetries) {
|
|
20
|
+
context.dependencies.logger.warn(`Node execution failed, retrying`, {
|
|
21
|
+
nodeId: nodeDef.id,
|
|
22
|
+
attempt,
|
|
23
|
+
maxRetries,
|
|
24
|
+
error: error instanceof Error ? error.message : String(error),
|
|
25
|
+
executionId
|
|
26
|
+
});
|
|
27
|
+
if (eventBus) await eventBus.emit({
|
|
28
|
+
type: "node:retry",
|
|
29
|
+
payload: {
|
|
30
|
+
nodeId: nodeDef.id,
|
|
31
|
+
attempt,
|
|
32
|
+
executionId: executionId || "",
|
|
33
|
+
blueprintId: context.dependencies.blueprint?.id || ""
|
|
34
|
+
}
|
|
35
|
+
});
|
|
36
|
+
} else context.dependencies.logger.error(`Node execution failed after all retries`, {
|
|
37
|
+
nodeId: nodeDef.id,
|
|
38
|
+
attempts: maxRetries,
|
|
39
|
+
error: error instanceof Error ? error.message : String(error),
|
|
40
|
+
executionId
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
throw lastError;
|
|
44
|
+
}
|
|
45
|
+
var FunctionNodeExecutor = class {
|
|
46
|
+
constructor(implementation, maxRetries, eventBus) {
|
|
47
|
+
this.implementation = implementation;
|
|
48
|
+
this.maxRetries = maxRetries;
|
|
49
|
+
this.eventBus = eventBus;
|
|
50
|
+
}
|
|
51
|
+
async execute(nodeDef, context, executionId, signal) {
|
|
52
|
+
return withRetries(() => this.implementation(context), this.maxRetries, nodeDef, context, executionId, signal, this.eventBus);
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
var ClassNodeExecutor = class {
|
|
56
|
+
constructor(implementation, maxRetries, eventBus) {
|
|
57
|
+
this.implementation = implementation;
|
|
58
|
+
this.maxRetries = maxRetries;
|
|
59
|
+
this.eventBus = eventBus;
|
|
60
|
+
}
|
|
61
|
+
async execute(nodeDef, context, executionId, signal) {
|
|
62
|
+
const instance = new this.implementation(nodeDef.params || {}, nodeDef.id);
|
|
63
|
+
let lastError;
|
|
64
|
+
try {
|
|
65
|
+
signal?.throwIfAborted();
|
|
66
|
+
const prepResult = await instance.prep(context);
|
|
67
|
+
let execResult;
|
|
68
|
+
try {
|
|
69
|
+
execResult = await withRetries(() => instance.exec(prepResult, context), this.maxRetries, nodeDef, context, executionId, signal, this.eventBus);
|
|
70
|
+
} catch (error) {
|
|
71
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
72
|
+
if (error instanceof DOMException && error.name === "AbortError") throw new FlowcraftError("Workflow cancelled", { isFatal: false });
|
|
73
|
+
if (error instanceof FlowcraftError && error.isFatal) throw error;
|
|
74
|
+
}
|
|
75
|
+
if (lastError) {
|
|
76
|
+
signal?.throwIfAborted();
|
|
77
|
+
execResult = await instance.fallback(lastError, context);
|
|
78
|
+
}
|
|
79
|
+
signal?.throwIfAborted();
|
|
80
|
+
if (!execResult) throw new Error("Execution failed after all retries");
|
|
81
|
+
return await instance.post(execResult, context);
|
|
82
|
+
} catch (error) {
|
|
83
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
84
|
+
if (error instanceof DOMException && error.name === "AbortError") throw new FlowcraftError("Workflow cancelled", { isFatal: false });
|
|
85
|
+
throw error;
|
|
86
|
+
} finally {
|
|
87
|
+
if (lastError) try {
|
|
88
|
+
await instance.recover(lastError, context);
|
|
89
|
+
} catch (recoverError) {
|
|
90
|
+
context.dependencies.logger.warn(`Recover phase failed`, {
|
|
91
|
+
nodeId: nodeDef.id,
|
|
92
|
+
originalError: lastError.message,
|
|
93
|
+
recoverError: recoverError instanceof Error ? recoverError.message : String(recoverError),
|
|
94
|
+
executionId
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
};
|
|
100
|
+
var NodeExecutor = class {
|
|
101
|
+
context;
|
|
102
|
+
nodeDef;
|
|
103
|
+
strategy;
|
|
104
|
+
constructor(config) {
|
|
105
|
+
this.context = config.context;
|
|
106
|
+
this.nodeDef = config.nodeDef;
|
|
107
|
+
this.strategy = config.strategy;
|
|
108
|
+
}
|
|
109
|
+
async execute(input) {
|
|
110
|
+
const nodeContext = {
|
|
111
|
+
context: this.context.state.getContext(),
|
|
112
|
+
input,
|
|
113
|
+
params: this.nodeDef.params || {},
|
|
114
|
+
dependencies: {
|
|
115
|
+
...this.context.services.dependencies,
|
|
116
|
+
logger: this.context.services.logger,
|
|
117
|
+
runtime: this.context,
|
|
118
|
+
workflowState: this.context.state
|
|
119
|
+
},
|
|
120
|
+
signal: this.context.signal
|
|
121
|
+
};
|
|
122
|
+
const beforeHooks = this.context.services.middleware.map((m) => m.beforeNode).filter((hook) => !!hook);
|
|
123
|
+
const afterHooks = this.context.services.middleware.map((m) => m.afterNode).filter((hook) => !!hook);
|
|
124
|
+
const aroundHooks = this.context.services.middleware.map((m) => m.aroundNode).filter((hook) => !!hook);
|
|
125
|
+
const coreExecution = async () => {
|
|
126
|
+
let result;
|
|
127
|
+
let error;
|
|
128
|
+
try {
|
|
129
|
+
for (const hook of beforeHooks) await hook(nodeContext.context, this.nodeDef.id);
|
|
130
|
+
result = await this.strategy.execute(this.nodeDef, nodeContext, this.context.executionId, this.context.signal);
|
|
131
|
+
return {
|
|
132
|
+
status: "success",
|
|
133
|
+
result
|
|
134
|
+
};
|
|
135
|
+
} catch (e) {
|
|
136
|
+
error = e instanceof Error ? e : new Error(String(e));
|
|
137
|
+
const flowcraftError = error instanceof FlowcraftError ? error : new FlowcraftError(`Node '${this.nodeDef.id}' execution failed`, {
|
|
138
|
+
cause: error,
|
|
139
|
+
nodeId: this.nodeDef.id,
|
|
140
|
+
blueprintId: this.context.blueprint.id,
|
|
141
|
+
executionId: this.context.executionId,
|
|
142
|
+
isFatal: false
|
|
143
|
+
});
|
|
144
|
+
const fallbackNodeId = this.nodeDef.config?.fallback;
|
|
145
|
+
const isLastAttempt = this.context.state.isLastAttempt ?? true;
|
|
146
|
+
if (fallbackNodeId && !flowcraftError.isFatal && isLastAttempt) {
|
|
147
|
+
this.context.services.logger.warn(`Node failed, fallback required`, {
|
|
148
|
+
nodeId: this.nodeDef.id,
|
|
149
|
+
fallbackNodeId,
|
|
150
|
+
error: error.message,
|
|
151
|
+
executionId: this.context.executionId
|
|
152
|
+
});
|
|
153
|
+
await this.context.services.eventBus.emit({
|
|
154
|
+
type: "node:fallback",
|
|
155
|
+
payload: {
|
|
156
|
+
nodeId: this.nodeDef.id,
|
|
157
|
+
executionId: this.context.executionId || "",
|
|
158
|
+
fallback: fallbackNodeId,
|
|
159
|
+
blueprintId: this.context.blueprint.id
|
|
160
|
+
}
|
|
161
|
+
});
|
|
162
|
+
return {
|
|
163
|
+
status: "failed_with_fallback",
|
|
164
|
+
fallbackNodeId,
|
|
165
|
+
error: flowcraftError
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
return {
|
|
169
|
+
status: "failed",
|
|
170
|
+
error: flowcraftError
|
|
171
|
+
};
|
|
172
|
+
} finally {
|
|
173
|
+
for (const hook of afterHooks) await hook(nodeContext.context, this.nodeDef.id, result, error);
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
let executionChain = coreExecution;
|
|
177
|
+
for (let i = aroundHooks.length - 1; i >= 0; i--) {
|
|
178
|
+
const hook = aroundHooks[i];
|
|
179
|
+
const next = executionChain;
|
|
180
|
+
executionChain = async () => {
|
|
181
|
+
let capturedResult;
|
|
182
|
+
const middlewareResult = await hook(nodeContext.context, this.nodeDef.id, async () => {
|
|
183
|
+
capturedResult = await next();
|
|
184
|
+
if (capturedResult.status === "success") return capturedResult.result;
|
|
185
|
+
throw capturedResult.error;
|
|
186
|
+
});
|
|
187
|
+
if (!capturedResult && middlewareResult) return {
|
|
188
|
+
status: "success",
|
|
189
|
+
result: middlewareResult
|
|
190
|
+
};
|
|
191
|
+
if (!capturedResult) throw new Error("Middleware did not call next() and did not return a result");
|
|
192
|
+
return capturedResult;
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
try {
|
|
196
|
+
await this.context.services.eventBus.emit({
|
|
197
|
+
type: "node:start",
|
|
198
|
+
payload: {
|
|
199
|
+
nodeId: this.nodeDef.id,
|
|
200
|
+
executionId: this.context.executionId || "",
|
|
201
|
+
input: nodeContext.input,
|
|
202
|
+
blueprintId: this.context.blueprint.id
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
const executionResult = await executionChain();
|
|
206
|
+
if (executionResult.status === "success") await this.context.services.eventBus.emit({
|
|
207
|
+
type: "node:finish",
|
|
208
|
+
payload: {
|
|
209
|
+
nodeId: this.nodeDef.id,
|
|
210
|
+
result: executionResult.result,
|
|
211
|
+
executionId: this.context.executionId || "",
|
|
212
|
+
blueprintId: this.context.blueprint.id
|
|
213
|
+
}
|
|
214
|
+
});
|
|
215
|
+
else await this.context.services.eventBus.emit({
|
|
216
|
+
type: "node:error",
|
|
217
|
+
payload: {
|
|
218
|
+
nodeId: this.nodeDef.id,
|
|
219
|
+
error: executionResult.error,
|
|
220
|
+
executionId: this.context.executionId || "",
|
|
221
|
+
blueprintId: this.context.blueprint.id
|
|
222
|
+
}
|
|
223
|
+
});
|
|
224
|
+
return executionResult;
|
|
225
|
+
} catch (error) {
|
|
226
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
227
|
+
const flowcraftError = err instanceof FlowcraftError ? err : new FlowcraftError(`Node '${this.nodeDef.id}' failed execution.`, {
|
|
228
|
+
cause: err,
|
|
229
|
+
nodeId: this.nodeDef.id,
|
|
230
|
+
blueprintId: this.context.blueprint.id,
|
|
231
|
+
executionId: this.context.executionId,
|
|
232
|
+
isFatal: false
|
|
233
|
+
});
|
|
234
|
+
await this.context.services.eventBus.emit({
|
|
235
|
+
type: "node:error",
|
|
236
|
+
payload: {
|
|
237
|
+
nodeId: this.nodeDef.id,
|
|
238
|
+
error: flowcraftError,
|
|
239
|
+
executionId: this.context.executionId || "",
|
|
240
|
+
blueprintId: this.context.blueprint.id
|
|
241
|
+
}
|
|
242
|
+
});
|
|
243
|
+
if (error instanceof DOMException && error.name === "AbortError") throw new FlowcraftError("Workflow cancelled", {
|
|
244
|
+
executionId: this.context.executionId,
|
|
245
|
+
isFatal: false
|
|
246
|
+
});
|
|
247
|
+
throw error instanceof FlowcraftError && !error.isFatal ? error : new FlowcraftError(`Node '${this.nodeDef.id}' failed execution.`, {
|
|
248
|
+
cause: error,
|
|
249
|
+
nodeId: this.nodeDef.id,
|
|
250
|
+
blueprintId: this.context.blueprint.id,
|
|
251
|
+
executionId: this.context.executionId,
|
|
252
|
+
isFatal: false
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
//#endregion
|
|
259
|
+
export { ClassNodeExecutor, FunctionNodeExecutor, NodeExecutor };
|