flowfram-runtime 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +151 -0
- package/dist/ServerFlowRuntime.d.ts +303 -0
- package/dist/ServerFlowRuntime.d.ts.map +1 -0
- package/dist/ServerFlowRuntime.js +5269 -0
- package/dist/ServerFlowRuntime.js.map +1 -0
- package/dist/cli.d.ts +17 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +184 -0
- package/dist/cli.js.map +1 -0
- package/dist/logger.d.ts +17 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +53 -0
- package/dist/logger.js.map +1 -0
- package/dist/persistence.d.ts +39 -0
- package/dist/persistence.d.ts.map +1 -0
- package/dist/persistence.js +138 -0
- package/dist/persistence.js.map +1 -0
- package/dist/server.d.ts +23 -0
- package/dist/server.d.ts.map +1 -0
- package/dist/server.js +1050 -0
- package/dist/server.js.map +1 -0
- package/package.json +73 -0
package/dist/server.js
ADDED
|
@@ -0,0 +1,1050 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* FlowRed Runtime Service
|
|
4
|
+
*
|
|
5
|
+
* Distributed runtime execution engine with REST API + SSE events.
|
|
6
|
+
*
|
|
7
|
+
* Endpoints:
|
|
8
|
+
* - POST /deploy - Deploy a flow
|
|
9
|
+
* - POST /stop - Stop a flow
|
|
10
|
+
* - POST /inject - Execute an inject node
|
|
11
|
+
* - GET /status - Get runtime status
|
|
12
|
+
* - GET /events - SSE endpoint for real-time events
|
|
13
|
+
* - GET /health - Health check
|
|
14
|
+
*/
|
|
15
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
16
|
+
if (k2 === undefined) k2 = k;
|
|
17
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
18
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
19
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
20
|
+
}
|
|
21
|
+
Object.defineProperty(o, k2, desc);
|
|
22
|
+
}) : (function(o, m, k, k2) {
|
|
23
|
+
if (k2 === undefined) k2 = k;
|
|
24
|
+
o[k2] = m[k];
|
|
25
|
+
}));
|
|
26
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
27
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
28
|
+
}) : function(o, v) {
|
|
29
|
+
o["default"] = v;
|
|
30
|
+
});
|
|
31
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
32
|
+
var ownKeys = function(o) {
|
|
33
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
34
|
+
var ar = [];
|
|
35
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
36
|
+
return ar;
|
|
37
|
+
};
|
|
38
|
+
return ownKeys(o);
|
|
39
|
+
};
|
|
40
|
+
return function (mod) {
|
|
41
|
+
if (mod && mod.__esModule) return mod;
|
|
42
|
+
var result = {};
|
|
43
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
44
|
+
__setModuleDefault(result, mod);
|
|
45
|
+
return result;
|
|
46
|
+
};
|
|
47
|
+
})();
|
|
48
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
49
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
50
|
+
};
|
|
51
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
52
|
+
exports.createServer = createServer;
|
|
53
|
+
// Load environment variables from .env files
|
|
54
|
+
// In production: only .env
|
|
55
|
+
// In development: .env + .env.local (local overrides)
|
|
56
|
+
const dotenv = __importStar(require("dotenv"));
|
|
57
|
+
const path = __importStar(require("path"));
|
|
58
|
+
const fs = __importStar(require("fs"));
|
|
59
|
+
// Load .env first (base config) - suppress dotenv logs
|
|
60
|
+
dotenv.config({ path: path.resolve(process.cwd(), ".env") });
|
|
61
|
+
// Only load .env.local in development mode (not production)
|
|
62
|
+
const isProduction = process.env.NODE_ENV === "production";
|
|
63
|
+
if (!isProduction) {
|
|
64
|
+
const envLocalPath = path.resolve(process.cwd(), ".env.local");
|
|
65
|
+
if (fs.existsSync(envLocalPath)) {
|
|
66
|
+
dotenv.config({ path: envLocalPath, override: true });
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
const express_1 = __importDefault(require("express"));
|
|
70
|
+
const cors_1 = __importDefault(require("cors"));
|
|
71
|
+
const ServerFlowRuntime_1 = require("./ServerFlowRuntime");
|
|
72
|
+
const persistence_1 = require("./persistence");
|
|
73
|
+
const logger_1 = __importDefault(require("./logger"));
|
|
74
|
+
const app = (0, express_1.default)();
|
|
75
|
+
app.use((0, cors_1.default)());
|
|
76
|
+
app.use(express_1.default.json({ limit: "10mb" }));
|
|
77
|
+
const PORT = process.env.PORT || 3001;
|
|
78
|
+
// Map of runtimes per flowId
|
|
79
|
+
const runtimes = new Map();
|
|
80
|
+
const deployedFlows = new Map();
|
|
81
|
+
// SSE connections per flowId
|
|
82
|
+
const sseConnections = new Map();
|
|
83
|
+
// ============================================================================
|
|
84
|
+
// HELPER FUNCTIONS
|
|
85
|
+
// ============================================================================
|
|
86
|
+
/**
|
|
87
|
+
* Format duration in milliseconds to human-readable string
|
|
88
|
+
*/
|
|
89
|
+
function formatDuration(ms) {
|
|
90
|
+
const seconds = Math.floor(ms / 1000);
|
|
91
|
+
const minutes = Math.floor(seconds / 60);
|
|
92
|
+
const hours = Math.floor(minutes / 60);
|
|
93
|
+
const days = Math.floor(hours / 24);
|
|
94
|
+
if (days > 0) {
|
|
95
|
+
return `${days}d ${hours % 24}h ${minutes % 60}m`;
|
|
96
|
+
}
|
|
97
|
+
else if (hours > 0) {
|
|
98
|
+
return `${hours}h ${minutes % 60}m ${seconds % 60}s`;
|
|
99
|
+
}
|
|
100
|
+
else if (minutes > 0) {
|
|
101
|
+
return `${minutes}m ${seconds % 60}s`;
|
|
102
|
+
}
|
|
103
|
+
else {
|
|
104
|
+
return `${seconds}s`;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
// ============================================================================
|
|
108
|
+
// REST ENDPOINTS
|
|
109
|
+
// ============================================================================
|
|
110
|
+
/**
|
|
111
|
+
* POST /deploy
|
|
112
|
+
* Deploy a flow to the runtime
|
|
113
|
+
*/
|
|
114
|
+
app.post("/deploy", async (req, res) => {
|
|
115
|
+
const { flowId, flowData, tenantId, modelName, scenarioName, userId, modelId, executionContext, maxHops, } = req.body;
|
|
116
|
+
if (!flowId) {
|
|
117
|
+
return res
|
|
118
|
+
.status(400)
|
|
119
|
+
.json({ success: false, error: "flowId is required" });
|
|
120
|
+
}
|
|
121
|
+
// Use tenantId from request or default to flowId
|
|
122
|
+
const effectiveTenantId = tenantId || flowId;
|
|
123
|
+
// Default execution context is 'floweditor' for backwards compatibility
|
|
124
|
+
const effectiveContext = executionContext || "floweditor";
|
|
125
|
+
logger_1.default.info(`[Runtime] POST /deploy flowId=${flowId} tenant=${effectiveTenantId} context=${effectiveContext} model=${modelName || "N/A"} nodes=${flowData?.nodes?.length || 0}`);
|
|
126
|
+
// Debug: log node types to check if chart-output/table-output are included
|
|
127
|
+
const nodeTypes = (flowData?.nodes || []).map((n) => n.type);
|
|
128
|
+
const vizNodes = (flowData?.nodes || []).filter((n) => n.type === "chart-output" || n.type === "table-output");
|
|
129
|
+
logger_1.default.debug(`[Runtime] Node types: ${JSON.stringify(nodeTypes)}`);
|
|
130
|
+
logger_1.default.debug(`[Runtime] Visualization nodes: ${vizNodes.length} (chart-output: ${vizNodes.filter((n) => n.type === "chart-output").length}, table-output: ${vizNodes.filter((n) => n.type === "table-output").length})`);
|
|
131
|
+
try {
|
|
132
|
+
let runtime = runtimes.get(flowId);
|
|
133
|
+
if (!runtime) {
|
|
134
|
+
runtime = new ServerFlowRuntime_1.ServerFlowRuntime(flowId, effectiveTenantId);
|
|
135
|
+
runtimes.set(flowId, runtime);
|
|
136
|
+
setupEventForwarding(runtime, flowId);
|
|
137
|
+
}
|
|
138
|
+
else {
|
|
139
|
+
// Clear flow context on redeploy to ensure fresh state for self-referential variables
|
|
140
|
+
const clearedKeys = runtime.clearFlowContext();
|
|
141
|
+
logger_1.default.debug(`[Runtime] Cleared ${clearedKeys.length} flow context keys on redeploy`);
|
|
142
|
+
}
|
|
143
|
+
// Set execution context for SSE event tagging
|
|
144
|
+
runtime.setExecutionContext(effectiveContext);
|
|
145
|
+
// Set max hops for cycle protection (user-configurable per model)
|
|
146
|
+
if (maxHops && typeof maxHops === 'number') {
|
|
147
|
+
runtime.setMaxHops(maxHops);
|
|
148
|
+
}
|
|
149
|
+
// Convert flowData to the format expected by ServerFlowRuntime
|
|
150
|
+
const flowJSON = buildFlowJSON(flowId, flowData);
|
|
151
|
+
const result = await runtime.deploy([flowJSON]);
|
|
152
|
+
const deployedAt = Date.now();
|
|
153
|
+
// Persist the deployed flow with tenant info and metadata
|
|
154
|
+
deployedFlows.set(flowId, {
|
|
155
|
+
flowData,
|
|
156
|
+
deployedAt,
|
|
157
|
+
tenantId: effectiveTenantId,
|
|
158
|
+
modelName,
|
|
159
|
+
scenarioName,
|
|
160
|
+
userId,
|
|
161
|
+
modelId,
|
|
162
|
+
});
|
|
163
|
+
(0, persistence_1.savePersistedState)(deployedFlows);
|
|
164
|
+
res.json({
|
|
165
|
+
success: result.status === "success" || result.status === "partial",
|
|
166
|
+
status: result.status,
|
|
167
|
+
nodesDeployed: result.nodesDeployed,
|
|
168
|
+
flowsDeployed: result.flowsDeployed,
|
|
169
|
+
errors: result.errors,
|
|
170
|
+
deployedAt,
|
|
171
|
+
});
|
|
172
|
+
}
|
|
173
|
+
catch (error) {
|
|
174
|
+
logger_1.default.error(`[Runtime] Deploy error:`, error);
|
|
175
|
+
res.status(500).json({
|
|
176
|
+
success: false,
|
|
177
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
});
|
|
181
|
+
/**
|
|
182
|
+
* POST /stop
|
|
183
|
+
* Stop a running flow
|
|
184
|
+
*/
|
|
185
|
+
app.post("/stop", async (req, res) => {
|
|
186
|
+
const { flowId } = req.body;
|
|
187
|
+
if (!flowId) {
|
|
188
|
+
return res
|
|
189
|
+
.status(400)
|
|
190
|
+
.json({ success: false, error: "flowId is required" });
|
|
191
|
+
}
|
|
192
|
+
logger_1.default.info(`[Runtime] POST /stop flowId=${flowId}`);
|
|
193
|
+
try {
|
|
194
|
+
const runtime = runtimes.get(flowId);
|
|
195
|
+
if (!runtime) {
|
|
196
|
+
return res.status(404).json({ success: false, error: "Flow not found" });
|
|
197
|
+
}
|
|
198
|
+
const result = await runtime.stop(flowId);
|
|
199
|
+
// Remove from persistence
|
|
200
|
+
deployedFlows.delete(flowId);
|
|
201
|
+
(0, persistence_1.removePersistedFlow)(flowId);
|
|
202
|
+
// Cleanup runtime after stop
|
|
203
|
+
runtimes.delete(flowId);
|
|
204
|
+
res.json({
|
|
205
|
+
success: result.success,
|
|
206
|
+
error: result.error,
|
|
207
|
+
timestamp: Date.now(),
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
catch (error) {
|
|
211
|
+
logger_1.default.error(`[Runtime] Stop error:`, error);
|
|
212
|
+
res.status(500).json({
|
|
213
|
+
success: false,
|
|
214
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
215
|
+
timestamp: Date.now(),
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
});
|
|
219
|
+
/**
|
|
220
|
+
* POST /config
|
|
221
|
+
* Configure runtime flags for a deployed flow
|
|
222
|
+
* Supports: suppressFunctionLogs, fastMode
|
|
223
|
+
*/
|
|
224
|
+
app.post("/config", (req, res) => {
|
|
225
|
+
const { flowId, suppressFunctionLogs, fastMode } = req.body;
|
|
226
|
+
if (!flowId) {
|
|
227
|
+
return res
|
|
228
|
+
.status(400)
|
|
229
|
+
.json({ success: false, error: "flowId is required" });
|
|
230
|
+
}
|
|
231
|
+
const runtime = runtimes.get(flowId);
|
|
232
|
+
if (!runtime) {
|
|
233
|
+
return res
|
|
234
|
+
.status(404)
|
|
235
|
+
.json({ success: false, error: "Flow not found. Deploy first." });
|
|
236
|
+
}
|
|
237
|
+
const applied = {};
|
|
238
|
+
if (typeof suppressFunctionLogs === "boolean") {
|
|
239
|
+
runtime.setSuppressFunctionLogs(suppressFunctionLogs);
|
|
240
|
+
applied.suppressFunctionLogs = suppressFunctionLogs;
|
|
241
|
+
}
|
|
242
|
+
if (typeof fastMode === "boolean") {
|
|
243
|
+
runtime.setFastMode(fastMode);
|
|
244
|
+
applied.fastMode = fastMode;
|
|
245
|
+
}
|
|
246
|
+
res.json({ success: true, applied });
|
|
247
|
+
});
|
|
248
|
+
/**
|
|
249
|
+
* POST /batch-run
|
|
250
|
+
* Execute multiple iterations server-side to eliminate per-iteration network latency
|
|
251
|
+
* Emits progress events via SSE as iterations complete
|
|
252
|
+
*/
|
|
253
|
+
app.post("/batch-run", async (req, res) => {
|
|
254
|
+
const { flowId, injectNodeIds, iterations, delayBetweenIterations = 0, executionContext = "instantiator", fastMode = false, // PERFORMANCE: Skip debug SSE events when true
|
|
255
|
+
suppressFunctionLogs, // Optional: override function log suppression (defaults to LOG_LEVEL-based)
|
|
256
|
+
} = req.body;
|
|
257
|
+
if (!flowId || !injectNodeIds || !iterations) {
|
|
258
|
+
return res.status(400).json({
|
|
259
|
+
success: false,
|
|
260
|
+
error: "flowId, injectNodeIds, and iterations are required",
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
logger_1.default.info(`[Runtime] POST /batch-run flowId=${flowId} iterations=${iterations} nodes=${injectNodeIds.length} delay=${delayBetweenIterations}ms fastMode=${fastMode}`);
|
|
264
|
+
try {
|
|
265
|
+
const runtime = runtimes.get(flowId);
|
|
266
|
+
if (!runtime) {
|
|
267
|
+
return res
|
|
268
|
+
.status(404)
|
|
269
|
+
.json({ success: false, error: "Flow not found. Deploy first." });
|
|
270
|
+
}
|
|
271
|
+
// Set execution context for SSE event tagging
|
|
272
|
+
runtime.setExecutionContext(executionContext);
|
|
273
|
+
// PERFORMANCE: Enable fast mode to suppress debug SSE events
|
|
274
|
+
runtime.setFastMode(fastMode);
|
|
275
|
+
// Optional: override function log suppression for this run
|
|
276
|
+
if (typeof suppressFunctionLogs === "boolean") {
|
|
277
|
+
runtime.setSuppressFunctionLogs(suppressFunctionLogs);
|
|
278
|
+
}
|
|
279
|
+
const startTime = Date.now();
|
|
280
|
+
let completedIterations = 0;
|
|
281
|
+
const currentMaxHops = runtime.getMaxHops();
|
|
282
|
+
// Emit batch-start event immediately so the client shows progress UI right away
|
|
283
|
+
// This is critical for fast mode UX: without it the user sees no feedback for up to a minute
|
|
284
|
+
const startConnections = sseConnections.get(flowId);
|
|
285
|
+
if (startConnections && startConnections.size > 0) {
|
|
286
|
+
const startEvent = {
|
|
287
|
+
type: "batch-start",
|
|
288
|
+
flowId,
|
|
289
|
+
totalIterations: iterations,
|
|
290
|
+
maxHops: currentMaxHops,
|
|
291
|
+
fastMode,
|
|
292
|
+
executionContext,
|
|
293
|
+
timestamp: Date.now(),
|
|
294
|
+
};
|
|
295
|
+
const startMessage = `event: batch-start\ndata: ${JSON.stringify(startEvent)}\n\n`;
|
|
296
|
+
startConnections.forEach((conn) => {
|
|
297
|
+
try {
|
|
298
|
+
conn.write(startMessage);
|
|
299
|
+
}
|
|
300
|
+
catch (e) {
|
|
301
|
+
// Connection closed
|
|
302
|
+
}
|
|
303
|
+
});
|
|
304
|
+
}
|
|
305
|
+
// Execute all iterations server-side
|
|
306
|
+
let flowError = null;
|
|
307
|
+
for (let i = 1; i <= iterations; i++) {
|
|
308
|
+
// CRITICAL: Check if flow has encountered an error - stop immediately
|
|
309
|
+
if (runtime.hasError()) {
|
|
310
|
+
flowError = runtime.getError() || "Flow execution stopped due to error";
|
|
311
|
+
logger_1.default.error(`[Runtime] Batch-run stopping at iteration ${i}: ${flowError}`);
|
|
312
|
+
// Emit batch-error event via SSE
|
|
313
|
+
const connections = sseConnections.get(flowId);
|
|
314
|
+
if (connections && connections.size > 0) {
|
|
315
|
+
const errorEvent = {
|
|
316
|
+
type: "batch-error",
|
|
317
|
+
flowId,
|
|
318
|
+
iteration: i,
|
|
319
|
+
totalIterations: iterations,
|
|
320
|
+
error: flowError,
|
|
321
|
+
executionContext,
|
|
322
|
+
timestamp: Date.now(),
|
|
323
|
+
};
|
|
324
|
+
const message = `event: batch-error\ndata: ${JSON.stringify(errorEvent)}\n\n`;
|
|
325
|
+
connections.forEach((conn) => {
|
|
326
|
+
try {
|
|
327
|
+
conn.write(message);
|
|
328
|
+
}
|
|
329
|
+
catch (e) {
|
|
330
|
+
// Connection closed
|
|
331
|
+
}
|
|
332
|
+
});
|
|
333
|
+
}
|
|
334
|
+
break;
|
|
335
|
+
}
|
|
336
|
+
// Set current iteration for SSE message tagging
|
|
337
|
+
runtime.setCurrentIteration(i);
|
|
338
|
+
// Execute all inject nodes for this iteration
|
|
339
|
+
for (const nodeId of injectNodeIds) {
|
|
340
|
+
await runtime.executeNode(nodeId, {});
|
|
341
|
+
// Check for error after each inject
|
|
342
|
+
if (runtime.hasError()) {
|
|
343
|
+
break;
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
// CRITICAL: Wait for all async operations (HTTP, MQTT, CSV) to complete
|
|
347
|
+
// This ensures the entire flow chain finishes before moving to next iteration
|
|
348
|
+
const completed = await runtime.waitForPendingMessages(30000, 50);
|
|
349
|
+
if (!completed) {
|
|
350
|
+
logger_1.default.warn(`[Runtime] Iteration ${i} timed out waiting for pending operations`);
|
|
351
|
+
}
|
|
352
|
+
// CRITICAL: Check for error AFTER async operations complete
|
|
353
|
+
// Errors occur during async execution, so we must check here
|
|
354
|
+
if (runtime.hasError()) {
|
|
355
|
+
flowError = runtime.getError() || "Flow execution stopped due to error";
|
|
356
|
+
logger_1.default.error(`[Runtime] Batch-run stopping after iteration ${i} due to error: ${flowError}`);
|
|
357
|
+
// Emit batch-error event via SSE
|
|
358
|
+
const errorConnections = sseConnections.get(flowId);
|
|
359
|
+
if (errorConnections && errorConnections.size > 0) {
|
|
360
|
+
const errorEvent = {
|
|
361
|
+
type: "batch-error",
|
|
362
|
+
flowId,
|
|
363
|
+
iteration: i,
|
|
364
|
+
totalIterations: iterations,
|
|
365
|
+
error: flowError,
|
|
366
|
+
executionContext,
|
|
367
|
+
timestamp: Date.now(),
|
|
368
|
+
};
|
|
369
|
+
const errorMessage = `event: batch-error\ndata: ${JSON.stringify(errorEvent)}\n\n`;
|
|
370
|
+
errorConnections.forEach((conn) => {
|
|
371
|
+
try {
|
|
372
|
+
conn.write(errorMessage);
|
|
373
|
+
}
|
|
374
|
+
catch (e) {
|
|
375
|
+
// Connection closed
|
|
376
|
+
}
|
|
377
|
+
});
|
|
378
|
+
}
|
|
379
|
+
break;
|
|
380
|
+
}
|
|
381
|
+
// Trigger observer nodes (coupling-matrix, resonance-indicator) at end of iteration
|
|
382
|
+
// This auto-triggers nodes without input wires to compute visualization data
|
|
383
|
+
// Safe: only affects nodes without input connections, no impact on normal flow
|
|
384
|
+
// PERFORMANCE: In fast mode, only compute entropy on the LAST iteration
|
|
385
|
+
// to avoid O(N²) re-scanning of the ever-growing _messageFlowHistory array.
|
|
386
|
+
// The full history is still accumulated and processed once at the end.
|
|
387
|
+
if (!fastMode || i === iterations) {
|
|
388
|
+
await runtime.triggerObserverNodes();
|
|
389
|
+
}
|
|
390
|
+
completedIterations = i;
|
|
391
|
+
// Emit batch progress event via SSE
|
|
392
|
+
const connections = sseConnections.get(flowId);
|
|
393
|
+
if (connections && connections.size > 0) {
|
|
394
|
+
const progressEvent = {
|
|
395
|
+
type: "batch-progress",
|
|
396
|
+
flowId,
|
|
397
|
+
iteration: i,
|
|
398
|
+
totalIterations: iterations,
|
|
399
|
+
maxHops: currentMaxHops,
|
|
400
|
+
executionContext,
|
|
401
|
+
timestamp: Date.now(),
|
|
402
|
+
};
|
|
403
|
+
const message = `event: batch-progress\ndata: ${JSON.stringify(progressEvent)}\n\n`;
|
|
404
|
+
connections.forEach((conn) => {
|
|
405
|
+
try {
|
|
406
|
+
conn.write(message);
|
|
407
|
+
}
|
|
408
|
+
catch (e) {
|
|
409
|
+
// Connection closed
|
|
410
|
+
}
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
// Delay between iterations if specified
|
|
414
|
+
if (delayBetweenIterations > 0 && i < iterations) {
|
|
415
|
+
await new Promise((resolve) => setTimeout(resolve, delayBetweenIterations));
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
const totalTime = Date.now() - startTime;
|
|
419
|
+
const avgTimePerIteration = totalTime / iterations;
|
|
420
|
+
// FAST MODE: Get accumulated data that was collected during fast mode execution
|
|
421
|
+
const fastModeMetadata = runtime.getAndClearFastModeMetadata();
|
|
422
|
+
const fastModeMessageFlow = runtime.getAndClearFastModeMessageFlow();
|
|
423
|
+
const fastModeOutputData = runtime.getAndClearFastModeOutputData();
|
|
424
|
+
// SERVER-SIDE RUNNING STATS: Get pre-computed statistics (no truncation)
|
|
425
|
+
const runningStats = runtime.getAndClearRunningStats();
|
|
426
|
+
const runningStatsVarCount = Object.values(runningStats).reduce((sum, funcVars) => sum + Object.keys(funcVars).length, 0);
|
|
427
|
+
logger_1.default.info(`[Runtime] Fast mode data collected: ${fastModeMetadata.length} metadata snapshots, ${fastModeMessageFlow.length} message-flow records, ${fastModeOutputData.length} output-data records, ${runningStatsVarCount} running-stats variables`);
|
|
428
|
+
// Emit batch complete event via SSE with accumulated fast mode data
|
|
429
|
+
const connections = sseConnections.get(flowId);
|
|
430
|
+
if (connections && connections.size > 0) {
|
|
431
|
+
const completeEvent = {
|
|
432
|
+
type: "batch-complete",
|
|
433
|
+
flowId,
|
|
434
|
+
iterations: completedIterations,
|
|
435
|
+
totalTime,
|
|
436
|
+
avgTimePerIteration,
|
|
437
|
+
executionContext,
|
|
438
|
+
timestamp: Date.now(),
|
|
439
|
+
// Include accumulated fast mode data for client-side processing
|
|
440
|
+
batchData: {
|
|
441
|
+
metadataSnapshots: fastModeMetadata,
|
|
442
|
+
messageFlowData: fastModeMessageFlow,
|
|
443
|
+
outputData: fastModeOutputData,
|
|
444
|
+
},
|
|
445
|
+
// SERVER-SIDE RUNNING STATS: Pre-computed statistics per function per variable
|
|
446
|
+
// These are computed using Welford's online algorithm with ZERO truncation
|
|
447
|
+
// Structure: { "F1 - Detection": { "PERF_F01": { count, mean, std, min, max, p5..p95, histogram } } }
|
|
448
|
+
serverStats: runningStats,
|
|
449
|
+
};
|
|
450
|
+
const message = `event: batch-complete\ndata: ${JSON.stringify(completeEvent)}\n\n`;
|
|
451
|
+
connections.forEach((conn) => {
|
|
452
|
+
try {
|
|
453
|
+
conn.write(message);
|
|
454
|
+
}
|
|
455
|
+
catch (e) {
|
|
456
|
+
// Connection closed
|
|
457
|
+
}
|
|
458
|
+
});
|
|
459
|
+
}
|
|
460
|
+
logger_1.default.info(`[Runtime] Batch complete: ${completedIterations} iterations in ${totalTime}ms (avg ${avgTimePerIteration.toFixed(2)}ms/iter)`);
|
|
461
|
+
// CRITICAL: Return error response if flow encountered an error
|
|
462
|
+
if (flowError) {
|
|
463
|
+
logger_1.default.error(`[Runtime] Batch-run stopped due to error: ${flowError}`);
|
|
464
|
+
return res.status(500).json({
|
|
465
|
+
success: false,
|
|
466
|
+
error: flowError,
|
|
467
|
+
completedIterations,
|
|
468
|
+
totalTime,
|
|
469
|
+
stoppedDueToError: true,
|
|
470
|
+
});
|
|
471
|
+
}
|
|
472
|
+
res.json({
|
|
473
|
+
success: true,
|
|
474
|
+
completedIterations,
|
|
475
|
+
totalTime,
|
|
476
|
+
avgTimePerIteration,
|
|
477
|
+
// Include serverStats in HTTP response for reliable delivery
|
|
478
|
+
// SSE batch-complete may not arrive in time for large payloads
|
|
479
|
+
serverStats: runningStats,
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
catch (error) {
|
|
483
|
+
logger_1.default.error(`[Runtime] Batch-run error:`, error);
|
|
484
|
+
res.status(500).json({
|
|
485
|
+
success: false,
|
|
486
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
487
|
+
});
|
|
488
|
+
}
|
|
489
|
+
});
|
|
490
|
+
/**
|
|
491
|
+
* POST /inject
|
|
492
|
+
* Execute an inject node
|
|
493
|
+
* Accepts optional simulationContext with iteration number for tracking
|
|
494
|
+
* If trackCompletion is true, starts a flow cycle and returns cycleId
|
|
495
|
+
*/
|
|
496
|
+
app.post("/inject", async (req, res) => {
|
|
497
|
+
const { flowId, nodeId, payload, simulationContext, trackCompletion } = req.body;
|
|
498
|
+
if (!flowId || !nodeId) {
|
|
499
|
+
return res
|
|
500
|
+
.status(400)
|
|
501
|
+
.json({ success: false, error: "flowId and nodeId are required" });
|
|
502
|
+
}
|
|
503
|
+
const iteration = simulationContext?.iteration;
|
|
504
|
+
logger_1.default.info(`[Runtime] POST /inject flowId=${flowId} nodeId=${nodeId} iteration=${iteration ?? "N/A"} trackCompletion=${trackCompletion ?? false}`);
|
|
505
|
+
try {
|
|
506
|
+
const runtime = runtimes.get(flowId);
|
|
507
|
+
if (!runtime) {
|
|
508
|
+
return res.status(404).json({ success: false, error: "Flow not found" });
|
|
509
|
+
}
|
|
510
|
+
// Store current iteration in runtime for SSE message tagging
|
|
511
|
+
if (iteration !== undefined) {
|
|
512
|
+
runtime.setCurrentIteration(iteration);
|
|
513
|
+
}
|
|
514
|
+
// Start flow cycle tracking if requested
|
|
515
|
+
let cycleId;
|
|
516
|
+
if (trackCompletion) {
|
|
517
|
+
cycleId = runtime.startFlowCycle();
|
|
518
|
+
}
|
|
519
|
+
await runtime.executeNode(nodeId, payload || {});
|
|
520
|
+
res.json({ success: true, cycleId });
|
|
521
|
+
}
|
|
522
|
+
catch (error) {
|
|
523
|
+
logger_1.default.error(`[Runtime] Inject error:`, error);
|
|
524
|
+
res.status(500).json({
|
|
525
|
+
success: false,
|
|
526
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
527
|
+
});
|
|
528
|
+
}
|
|
529
|
+
});
|
|
530
|
+
/**
|
|
531
|
+
* GET /status
|
|
532
|
+
* Get runtime status
|
|
533
|
+
*/
|
|
534
|
+
app.get("/status", async (req, res) => {
|
|
535
|
+
const flowId = req.query.flowId;
|
|
536
|
+
if (flowId) {
|
|
537
|
+
const runtime = runtimes.get(flowId);
|
|
538
|
+
if (!runtime) {
|
|
539
|
+
return res.status(404).json({ error: "Flow not found" });
|
|
540
|
+
}
|
|
541
|
+
try {
|
|
542
|
+
const status = await runtime.getStatus();
|
|
543
|
+
res.json(status);
|
|
544
|
+
}
|
|
545
|
+
catch (error) {
|
|
546
|
+
res.status(500).json({ error: "Failed to get status" });
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
else {
|
|
550
|
+
// Return global status
|
|
551
|
+
res.json({
|
|
552
|
+
totalFlows: runtimes.size,
|
|
553
|
+
flows: Array.from(runtimes.keys()),
|
|
554
|
+
sseConnections: Array.from(sseConnections.values()).reduce((sum, set) => sum + set.size, 0),
|
|
555
|
+
});
|
|
556
|
+
}
|
|
557
|
+
});
|
|
558
|
+
/**
|
|
559
|
+
* GET /flows
|
|
560
|
+
* List all active flows with detailed information (for admin panel)
|
|
561
|
+
*/
|
|
562
|
+
app.get("/flows", async (req, res) => {
|
|
563
|
+
logger_1.default.info(`[Runtime] GET /flows - listing ${runtimes.size} active flows`);
|
|
564
|
+
try {
|
|
565
|
+
const flows = [];
|
|
566
|
+
for (const [flowId, runtime] of runtimes.entries()) {
|
|
567
|
+
const deployInfo = deployedFlows.get(flowId);
|
|
568
|
+
const sseCount = sseConnections.get(flowId)?.size || 0;
|
|
569
|
+
let status = {};
|
|
570
|
+
try {
|
|
571
|
+
status = await runtime.getStatus();
|
|
572
|
+
}
|
|
573
|
+
catch (e) {
|
|
574
|
+
// Ignore status errors
|
|
575
|
+
}
|
|
576
|
+
// Calculate running time
|
|
577
|
+
const deployedAt = deployInfo?.deployedAt || Date.now();
|
|
578
|
+
const runningTimeMs = Date.now() - deployedAt;
|
|
579
|
+
flows.push({
|
|
580
|
+
flowId,
|
|
581
|
+
tenantId: deployInfo?.tenantId || flowId,
|
|
582
|
+
deployedAt,
|
|
583
|
+
runningTimeMs,
|
|
584
|
+
runningTimeFormatted: formatDuration(runningTimeMs),
|
|
585
|
+
nodeCount: deployInfo?.flowData?.nodes?.length || 0,
|
|
586
|
+
edgeCount: deployInfo?.flowData?.edges?.length || 0,
|
|
587
|
+
sseConnections: sseCount,
|
|
588
|
+
status: {
|
|
589
|
+
uptime: status.uptime || 0,
|
|
590
|
+
nodesTotal: status.nodes?.total || 0,
|
|
591
|
+
nodesRunning: status.nodes?.running || 0,
|
|
592
|
+
},
|
|
593
|
+
// Additional metadata for admin display
|
|
594
|
+
modelName: deployInfo?.modelName || null,
|
|
595
|
+
scenarioName: deployInfo?.scenarioName || null,
|
|
596
|
+
userId: deployInfo?.userId || null,
|
|
597
|
+
modelId: deployInfo?.modelId || null,
|
|
598
|
+
});
|
|
599
|
+
}
|
|
600
|
+
// Sort by deployedAt descending (newest first)
|
|
601
|
+
flows.sort((a, b) => b.deployedAt - a.deployedAt);
|
|
602
|
+
res.json({
|
|
603
|
+
success: true,
|
|
604
|
+
totalFlows: flows.length,
|
|
605
|
+
flows,
|
|
606
|
+
timestamp: Date.now(),
|
|
607
|
+
});
|
|
608
|
+
}
|
|
609
|
+
catch (error) {
|
|
610
|
+
logger_1.default.error(`[Runtime] GET /flows error:`, error);
|
|
611
|
+
res.status(500).json({
|
|
612
|
+
success: false,
|
|
613
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
614
|
+
});
|
|
615
|
+
}
|
|
616
|
+
});
|
|
617
|
+
/**
|
|
618
|
+
* POST /flows/:flowId/stop
|
|
619
|
+
* Stop a specific flow (admin action)
|
|
620
|
+
*/
|
|
621
|
+
app.post("/flows/:flowId/stop", async (req, res) => {
|
|
622
|
+
const { flowId } = req.params;
|
|
623
|
+
logger_1.default.info(`[Runtime] POST /flows/${flowId}/stop (admin action)`);
|
|
624
|
+
try {
|
|
625
|
+
const runtime = runtimes.get(flowId);
|
|
626
|
+
if (!runtime) {
|
|
627
|
+
return res.status(404).json({ success: false, error: "Flow not found" });
|
|
628
|
+
}
|
|
629
|
+
const result = await runtime.stop(flowId);
|
|
630
|
+
// Remove from persistence
|
|
631
|
+
deployedFlows.delete(flowId);
|
|
632
|
+
(0, persistence_1.removePersistedFlow)(flowId);
|
|
633
|
+
// Cleanup runtime after stop
|
|
634
|
+
runtimes.delete(flowId);
|
|
635
|
+
// Close SSE connections for this flow
|
|
636
|
+
const connections = sseConnections.get(flowId);
|
|
637
|
+
if (connections) {
|
|
638
|
+
connections.forEach((conn) => {
|
|
639
|
+
try {
|
|
640
|
+
conn.write(`event: flow-stopped\ndata: ${JSON.stringify({ flowId, timestamp: Date.now() })}\n\n`);
|
|
641
|
+
conn.end();
|
|
642
|
+
}
|
|
643
|
+
catch (e) {
|
|
644
|
+
// Ignore connection errors
|
|
645
|
+
}
|
|
646
|
+
});
|
|
647
|
+
sseConnections.delete(flowId);
|
|
648
|
+
}
|
|
649
|
+
res.json({
|
|
650
|
+
success: result.success,
|
|
651
|
+
flowId,
|
|
652
|
+
timestamp: Date.now(),
|
|
653
|
+
});
|
|
654
|
+
}
|
|
655
|
+
catch (error) {
|
|
656
|
+
logger_1.default.error(`[Runtime] Stop flow error:`, error);
|
|
657
|
+
res.status(500).json({
|
|
658
|
+
success: false,
|
|
659
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
660
|
+
});
|
|
661
|
+
}
|
|
662
|
+
});
|
|
663
|
+
// ============================================================================
|
|
664
|
+
// SSE ENDPOINT
|
|
665
|
+
// ============================================================================
|
|
666
|
+
/**
|
|
667
|
+
* GET /events
|
|
668
|
+
* SSE endpoint for real-time runtime events
|
|
669
|
+
*/
|
|
670
|
+
app.get("/events", (req, res) => {
|
|
671
|
+
const flowId = req.query.flowId;
|
|
672
|
+
if (!flowId) {
|
|
673
|
+
return res
|
|
674
|
+
.status(400)
|
|
675
|
+
.json({ error: "flowId query parameter is required" });
|
|
676
|
+
}
|
|
677
|
+
logger_1.default.info(`[Runtime] SSE connection for flowId=${flowId}`);
|
|
678
|
+
// Setup SSE headers
|
|
679
|
+
res.setHeader("Content-Type", "text/event-stream");
|
|
680
|
+
res.setHeader("Cache-Control", "no-cache");
|
|
681
|
+
res.setHeader("Connection", "keep-alive");
|
|
682
|
+
res.setHeader("X-Accel-Buffering", "no");
|
|
683
|
+
// Send retry delay instruction (3 seconds for reconnection)
|
|
684
|
+
res.write("retry: 3000\n\n");
|
|
685
|
+
// Check if flow is running
|
|
686
|
+
const runtime = runtimes.get(flowId);
|
|
687
|
+
const isRunning = runtime ? true : false;
|
|
688
|
+
// Send initial connection event with flow status
|
|
689
|
+
const connectionEvent = JSON.stringify({
|
|
690
|
+
type: "connected",
|
|
691
|
+
flowId,
|
|
692
|
+
timestamp: Date.now(),
|
|
693
|
+
flowRunning: isRunning,
|
|
694
|
+
});
|
|
695
|
+
res.write(`event: connected\ndata: ${connectionEvent}\n\n`);
|
|
696
|
+
// Add to SSE connections
|
|
697
|
+
if (!sseConnections.has(flowId)) {
|
|
698
|
+
sseConnections.set(flowId, new Set());
|
|
699
|
+
}
|
|
700
|
+
sseConnections.get(flowId).add(res);
|
|
701
|
+
// Heartbeat every 10s with flow status
|
|
702
|
+
const heartbeat = setInterval(async () => {
|
|
703
|
+
try {
|
|
704
|
+
const rt = runtimes.get(flowId);
|
|
705
|
+
const running = rt ? true : false;
|
|
706
|
+
let metrics = {};
|
|
707
|
+
if (rt) {
|
|
708
|
+
try {
|
|
709
|
+
const status = await rt.getStatus();
|
|
710
|
+
metrics = {
|
|
711
|
+
uptime: status.uptime,
|
|
712
|
+
nodes: status.nodes?.total || 0,
|
|
713
|
+
};
|
|
714
|
+
}
|
|
715
|
+
catch (e) {
|
|
716
|
+
// Ignore errors getting status
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
const heartbeatEvent = JSON.stringify({
|
|
720
|
+
type: "heartbeat",
|
|
721
|
+
flowId,
|
|
722
|
+
timestamp: Date.now(),
|
|
723
|
+
flowRunning: running,
|
|
724
|
+
metrics,
|
|
725
|
+
});
|
|
726
|
+
res.write(`event: heartbeat\ndata: ${heartbeatEvent}\n\n`);
|
|
727
|
+
}
|
|
728
|
+
catch (error) {
|
|
729
|
+
clearInterval(heartbeat);
|
|
730
|
+
}
|
|
731
|
+
}, 10000);
|
|
732
|
+
// Cleanup on disconnect
|
|
733
|
+
req.on("close", () => {
|
|
734
|
+
logger_1.default.info(`[Runtime] SSE disconnected flowId=${flowId}`);
|
|
735
|
+
clearInterval(heartbeat);
|
|
736
|
+
sseConnections.get(flowId)?.delete(res);
|
|
737
|
+
});
|
|
738
|
+
});
|
|
739
|
+
// ============================================================================
|
|
740
|
+
// HEALTH CHECK
|
|
741
|
+
// ============================================================================
|
|
742
|
+
/**
|
|
743
|
+
* POST /context/clear
|
|
744
|
+
* Clear flow context for a specific flow (for self-referential variables)
|
|
745
|
+
*/
|
|
746
|
+
app.post("/context/clear", async (req, res) => {
|
|
747
|
+
const { flowId } = req.body;
|
|
748
|
+
if (!flowId) {
|
|
749
|
+
return res
|
|
750
|
+
.status(400)
|
|
751
|
+
.json({ success: false, error: "flowId is required" });
|
|
752
|
+
}
|
|
753
|
+
logger_1.default.info(`[Runtime] POST /context/clear flowId=${flowId}`);
|
|
754
|
+
try {
|
|
755
|
+
const runtime = runtimes.get(flowId);
|
|
756
|
+
let clearedKeys = [];
|
|
757
|
+
if (runtime) {
|
|
758
|
+
// Use the runtime's clearFlowContext method
|
|
759
|
+
clearedKeys = runtime.clearFlowContext();
|
|
760
|
+
}
|
|
761
|
+
else {
|
|
762
|
+
// Flow not deployed yet - nothing to clear
|
|
763
|
+
logger_1.default.debug(`[Runtime] Flow ${flowId} not deployed, nothing to clear`);
|
|
764
|
+
}
|
|
765
|
+
res.json({
|
|
766
|
+
success: true,
|
|
767
|
+
data: {
|
|
768
|
+
flowId,
|
|
769
|
+
clearedKeys,
|
|
770
|
+
},
|
|
771
|
+
});
|
|
772
|
+
}
|
|
773
|
+
catch (error) {
|
|
774
|
+
logger_1.default.error(`[Runtime] Context clear error:`, error);
|
|
775
|
+
res.status(500).json({
|
|
776
|
+
success: false,
|
|
777
|
+
error: error instanceof Error ? error.message : "Failed to clear context",
|
|
778
|
+
});
|
|
779
|
+
}
|
|
780
|
+
});
|
|
781
|
+
/**
|
|
782
|
+
* POST /setInstantiation
|
|
783
|
+
* Set the current instantiation number for a flow (used by multi-cycle controllers)
|
|
784
|
+
*/
|
|
785
|
+
app.post("/setInstantiation", async (req, res) => {
|
|
786
|
+
const { flowId, instantiation, version } = req.body;
|
|
787
|
+
if (!flowId) {
|
|
788
|
+
return res
|
|
789
|
+
.status(400)
|
|
790
|
+
.json({ success: false, error: "flowId is required" });
|
|
791
|
+
}
|
|
792
|
+
logger_1.default.info(`[Runtime] POST /setInstantiation flowId=${flowId} instantiation=${instantiation} version=${version}`);
|
|
793
|
+
try {
|
|
794
|
+
const runtime = runtimes.get(flowId);
|
|
795
|
+
if (runtime) {
|
|
796
|
+
// Store instantiation number in runtime context for tracking
|
|
797
|
+
runtime.setFlowContextValue("_currentInstantiation", instantiation);
|
|
798
|
+
runtime.setFlowContextValue("_controllerVersion", version);
|
|
799
|
+
}
|
|
800
|
+
else {
|
|
801
|
+
logger_1.default.debug(`[Runtime] Flow ${flowId} not deployed, cannot set instantiation`);
|
|
802
|
+
return res
|
|
803
|
+
.status(404)
|
|
804
|
+
.json({ success: false, error: "Flow not deployed" });
|
|
805
|
+
}
|
|
806
|
+
res.json({
|
|
807
|
+
success: true,
|
|
808
|
+
data: {
|
|
809
|
+
flowId,
|
|
810
|
+
instantiation,
|
|
811
|
+
version,
|
|
812
|
+
},
|
|
813
|
+
});
|
|
814
|
+
}
|
|
815
|
+
catch (error) {
|
|
816
|
+
logger_1.default.error(`[Runtime] SetInstantiation error:`, error);
|
|
817
|
+
res.status(500).json({
|
|
818
|
+
success: false,
|
|
819
|
+
error: error instanceof Error ? error.message : "Failed to set instantiation",
|
|
820
|
+
});
|
|
821
|
+
}
|
|
822
|
+
});
|
|
823
|
+
/**
|
|
824
|
+
* GET /health
|
|
825
|
+
* Health check endpoint
|
|
826
|
+
*/
|
|
827
|
+
app.get("/health", (req, res) => {
|
|
828
|
+
res.json({
|
|
829
|
+
status: "healthy",
|
|
830
|
+
uptime: process.uptime(),
|
|
831
|
+
flows: runtimes.size,
|
|
832
|
+
sseConnections: Array.from(sseConnections.values()).reduce((sum, set) => sum + set.size, 0),
|
|
833
|
+
memory: process.memoryUsage(),
|
|
834
|
+
timestamp: Date.now(),
|
|
835
|
+
});
|
|
836
|
+
});
|
|
837
|
+
// ============================================================================
|
|
838
|
+
// HELPER FUNCTIONS
|
|
839
|
+
// ============================================================================
|
|
840
|
+
/**
|
|
841
|
+
* Setup event forwarding from runtime to SSE connections
|
|
842
|
+
* Includes current iteration and execution context in events for proper message tagging
|
|
843
|
+
*/
|
|
844
|
+
function setupEventForwarding(runtime, flowId) {
|
|
845
|
+
runtime.on("runtime-event", (event) => {
|
|
846
|
+
const connections = sseConnections.get(flowId);
|
|
847
|
+
// Debug: log coupling-data and resonance-data events
|
|
848
|
+
if (event.type === "coupling-data" || event.type === "resonance-data") {
|
|
849
|
+
logger_1.default.debug(`[SSE Forward] ${event.type} event for ${flowId}: connections=${connections?.size || 0}`);
|
|
850
|
+
}
|
|
851
|
+
if (connections && connections.size > 0) {
|
|
852
|
+
// Include current iteration and execution context in event data for client-side filtering
|
|
853
|
+
const eventWithContext = {
|
|
854
|
+
...event,
|
|
855
|
+
iteration: runtime.getCurrentIteration(),
|
|
856
|
+
executionContext: runtime.getExecutionContext(),
|
|
857
|
+
};
|
|
858
|
+
const message = `event: ${event.type}\ndata: ${JSON.stringify(eventWithContext)}\n\n`;
|
|
859
|
+
connections.forEach((res) => {
|
|
860
|
+
try {
|
|
861
|
+
res.write(message);
|
|
862
|
+
}
|
|
863
|
+
catch (error) {
|
|
864
|
+
// Connection closed, will be cleaned up on 'close' event
|
|
865
|
+
}
|
|
866
|
+
});
|
|
867
|
+
}
|
|
868
|
+
});
|
|
869
|
+
}
|
|
870
|
+
/**
|
|
871
|
+
* Build flow JSON from flowData
|
|
872
|
+
*/
|
|
873
|
+
function buildFlowJSON(flowId, flowData) {
|
|
874
|
+
if (!flowData || !flowData.nodes) {
|
|
875
|
+
return { id: flowId, name: `Flow ${flowId}`, nodes: [], edges: [] };
|
|
876
|
+
}
|
|
877
|
+
// Convert React Flow edges to Node-RED wires format
|
|
878
|
+
// wires format: [[output0-targets], [output1-targets], ...]
|
|
879
|
+
// sourceHandle indicates which output port (0, 1, 2, etc.)
|
|
880
|
+
const edgesBySourceAndHandle = new Map();
|
|
881
|
+
for (const edge of flowData.edges || []) {
|
|
882
|
+
const sourceId = edge.source;
|
|
883
|
+
const targetId = edge.target;
|
|
884
|
+
// sourceHandle can be "0", "1", "output-0", "output-1", etc. or undefined (defaults to 0)
|
|
885
|
+
let handleIndex = 0;
|
|
886
|
+
if (edge.sourceHandle) {
|
|
887
|
+
// Extract number from sourceHandle (handles both "0" and "output-0" formats)
|
|
888
|
+
const match = edge.sourceHandle.match(/(\d+)/);
|
|
889
|
+
handleIndex = match ? parseInt(match[1], 10) : 0;
|
|
890
|
+
}
|
|
891
|
+
if (!edgesBySourceAndHandle.has(sourceId)) {
|
|
892
|
+
edgesBySourceAndHandle.set(sourceId, new Map());
|
|
893
|
+
}
|
|
894
|
+
const handleMap = edgesBySourceAndHandle.get(sourceId);
|
|
895
|
+
if (!handleMap.has(handleIndex)) {
|
|
896
|
+
handleMap.set(handleIndex, []);
|
|
897
|
+
}
|
|
898
|
+
handleMap.get(handleIndex).push(targetId);
|
|
899
|
+
}
|
|
900
|
+
return {
|
|
901
|
+
id: flowId,
|
|
902
|
+
name: flowData.name || `Flow ${flowId}`,
|
|
903
|
+
nodes: flowData.nodes.map((node) => {
|
|
904
|
+
const handleMap = edgesBySourceAndHandle.get(node.id);
|
|
905
|
+
let wires = [];
|
|
906
|
+
if (handleMap && handleMap.size > 0) {
|
|
907
|
+
// Find max handle index to create proper array size
|
|
908
|
+
const maxHandle = Math.max(...handleMap.keys());
|
|
909
|
+
wires = Array(maxHandle + 1)
|
|
910
|
+
.fill(null)
|
|
911
|
+
.map(() => []);
|
|
912
|
+
handleMap.forEach((targets, handleIndex) => {
|
|
913
|
+
// Deduplicate targets to avoid sending duplicate messages
|
|
914
|
+
// This can happen when edges with different sourceHandle names (e.g., "output-success", "output-error")
|
|
915
|
+
// that don't contain numbers all map to handleIndex 0
|
|
916
|
+
wires[handleIndex] = [...new Set(targets)];
|
|
917
|
+
});
|
|
918
|
+
}
|
|
919
|
+
else if (node.data?.wires) {
|
|
920
|
+
wires = node.data.wires;
|
|
921
|
+
}
|
|
922
|
+
// Destructure node to separate known fields from extra properties
|
|
923
|
+
const { id, type, position, data, ...nodeExtras } = node;
|
|
924
|
+
// For debug nodes, map outputType to complete property
|
|
925
|
+
// outputType: "payload" -> complete: false (show payload only)
|
|
926
|
+
// outputType: "complete" -> complete: true (show full message)
|
|
927
|
+
// outputType: "property" -> complete: <property path> (show specific property)
|
|
928
|
+
let debugComplete = undefined;
|
|
929
|
+
if (type === "debug") {
|
|
930
|
+
const outputType = data?.outputType || "payload";
|
|
931
|
+
if (outputType === "complete") {
|
|
932
|
+
debugComplete = true;
|
|
933
|
+
}
|
|
934
|
+
else if (outputType === "property") {
|
|
935
|
+
debugComplete = data?.property || "payload";
|
|
936
|
+
}
|
|
937
|
+
else {
|
|
938
|
+
debugComplete = false; // Default: show payload only
|
|
939
|
+
}
|
|
940
|
+
// DEBUG: Log the mapping
|
|
941
|
+
logger_1.default.debug(`[buildFlowJSON] Debug node ${id}: outputType=${outputType}, property=${data?.property}, mapped complete=${debugComplete}, original complete=${data?.complete}`);
|
|
942
|
+
}
|
|
943
|
+
return {
|
|
944
|
+
id,
|
|
945
|
+
type,
|
|
946
|
+
name: data?.name || data?.label || id,
|
|
947
|
+
x: position?.x || 0,
|
|
948
|
+
y: position?.y || 0,
|
|
949
|
+
z: flowId,
|
|
950
|
+
// Include properties from node level (e.g., property, rules for switch)
|
|
951
|
+
...nodeExtras,
|
|
952
|
+
// Include properties from data
|
|
953
|
+
...data,
|
|
954
|
+
// For debug nodes, override complete with mapped value
|
|
955
|
+
...(type === "debug" && debugComplete !== undefined
|
|
956
|
+
? { complete: debugComplete }
|
|
957
|
+
: {}),
|
|
958
|
+
// wires MUST come last to ensure edges take priority
|
|
959
|
+
wires,
|
|
960
|
+
};
|
|
961
|
+
}),
|
|
962
|
+
edges: flowData.edges || [],
|
|
963
|
+
};
|
|
964
|
+
}
|
|
965
|
+
// ============================================================================
|
|
966
|
+
// RESTORE PERSISTED FLOWS ON STARTUP
|
|
967
|
+
// ============================================================================
|
|
968
|
+
async function restorePersistedFlows() {
|
|
969
|
+
const state = (0, persistence_1.loadPersistedState)();
|
|
970
|
+
if (!state || state.flows.length === 0) {
|
|
971
|
+
logger_1.default.info("[Startup] No flows to restore");
|
|
972
|
+
return;
|
|
973
|
+
}
|
|
974
|
+
logger_1.default.info(`[Startup] Restoring ${state.flows.length} persisted flows...`);
|
|
975
|
+
for (const persistedFlow of state.flows) {
|
|
976
|
+
try {
|
|
977
|
+
const { flowId, flowData, deployedAt, tenantId } = persistedFlow;
|
|
978
|
+
logger_1.default.info(`[Startup] Restoring flow: ${flowId}`);
|
|
979
|
+
// Create runtime
|
|
980
|
+
const runtime = new ServerFlowRuntime_1.ServerFlowRuntime(flowId, tenantId || flowId);
|
|
981
|
+
runtimes.set(flowId, runtime);
|
|
982
|
+
setupEventForwarding(runtime, flowId);
|
|
983
|
+
// Deploy the flow
|
|
984
|
+
const flowJSON = buildFlowJSON(flowId, flowData);
|
|
985
|
+
await runtime.deploy([flowJSON]);
|
|
986
|
+
// Track in memory
|
|
987
|
+
deployedFlows.set(flowId, { flowData, deployedAt, tenantId });
|
|
988
|
+
logger_1.default.info(`[Startup] ✅ Flow ${flowId} restored successfully`);
|
|
989
|
+
}
|
|
990
|
+
catch (error) {
|
|
991
|
+
logger_1.default.error(`[Startup] ❌ Failed to restore flow ${persistedFlow.flowId}:`, error);
|
|
992
|
+
}
|
|
993
|
+
}
|
|
994
|
+
logger_1.default.info(`[Startup] Restoration complete: ${runtimes.size} flows active`);
|
|
995
|
+
}
|
|
996
|
+
function createServer(options = {}) {
|
|
997
|
+
const port = options.port || parseInt(process.env.PORT || "3001", 10);
|
|
998
|
+
const host = options.host || "0.0.0.0";
|
|
999
|
+
const silent = options.silent || false;
|
|
1000
|
+
return new Promise((resolve, reject) => {
|
|
1001
|
+
const server = app.listen(port, host, async () => {
|
|
1002
|
+
// CRITICAL: Increase server timeouts for long-running batch operations.
|
|
1003
|
+
// Node.js HTTP server defaults: headersTimeout=60s, requestTimeout=300s, keepAliveTimeout=5s.
|
|
1004
|
+
// For batch-run with 10,000+ iterations, operations can take 20+ minutes.
|
|
1005
|
+
// Without this, the server may close the connection before the batch completes.
|
|
1006
|
+
server.headersTimeout = 60 * 60 * 1000; // 60 minutes
|
|
1007
|
+
server.requestTimeout = 60 * 60 * 1000; // 60 minutes
|
|
1008
|
+
server.keepAliveTimeout = 60 * 60 * 1000; // 60 minutes
|
|
1009
|
+
if (!silent) {
|
|
1010
|
+
logger_1.default.always(`
|
|
1011
|
+
╔══════════════════════════════════════════════════════════════╗
|
|
1012
|
+
║ FlowRed Runtime Service ║
|
|
1013
|
+
╠══════════════════════════════════════════════════════════════╣
|
|
1014
|
+
║ REST API + SSE Events ║
|
|
1015
|
+
║ Port: ${port} ║
|
|
1016
|
+
║ Host: ${host} ║
|
|
1017
|
+
║ Log Level: ${process.env.LOG_LEVEL || "info"} ║
|
|
1018
|
+
╠══════════════════════════════════════════════════════════════╣
|
|
1019
|
+
║ Endpoints: ║
|
|
1020
|
+
║ • POST /deploy - Deploy a flow ║
|
|
1021
|
+
║ • POST /stop - Stop a flow ║
|
|
1022
|
+
║ • POST /inject - Execute inject node ║
|
|
1023
|
+
║ • POST /context/clear - Clear flow context ║
|
|
1024
|
+
║ • GET /status - Runtime status ║
|
|
1025
|
+
║ • GET /events - SSE events (flowId required) ║
|
|
1026
|
+
║ • GET /health - Health check ║
|
|
1027
|
+
╚══════════════════════════════════════════════════════════════╝
|
|
1028
|
+
`);
|
|
1029
|
+
}
|
|
1030
|
+
// Restore persisted flows after server starts
|
|
1031
|
+
await restorePersistedFlows();
|
|
1032
|
+
resolve(server);
|
|
1033
|
+
});
|
|
1034
|
+
server.on("error", (error) => {
|
|
1035
|
+
reject(error);
|
|
1036
|
+
});
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
// ============================================================================
|
|
1040
|
+
// AUTO-START (when run directly, not imported)
|
|
1041
|
+
// ============================================================================
|
|
1042
|
+
// Only auto-start if this file is run directly (not imported as module)
|
|
1043
|
+
const isMainModule = require.main === module;
|
|
1044
|
+
if (isMainModule) {
|
|
1045
|
+
createServer().catch((error) => {
|
|
1046
|
+
console.error("Failed to start server:", error.message);
|
|
1047
|
+
process.exit(1);
|
|
1048
|
+
});
|
|
1049
|
+
}
|
|
1050
|
+
//# sourceMappingURL=server.js.map
|