@orbytautomation/engine 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/AdapterRegistry.d.ts.map +1 -1
- package/dist/adapters/AdapterRegistry.js +6 -0
- package/dist/adapters/AdapterRegistry.js.map +1 -1
- package/dist/context/VariableResolver.d.ts.map +1 -1
- package/dist/context/VariableResolver.js +7 -0
- package/dist/context/VariableResolver.js.map +1 -1
- package/dist/core/OrbytEngine.d.ts +19 -0
- package/dist/core/OrbytEngine.d.ts.map +1 -1
- package/dist/core/OrbytEngine.js +79 -3
- package/dist/core/OrbytEngine.js.map +1 -1
- package/dist/core/index.d.ts +1 -1
- package/dist/core/index.d.ts.map +1 -1
- package/dist/core/index.js +1 -1
- package/dist/core/index.js.map +1 -1
- package/dist/errors/ErrorDebugger.d.ts.map +1 -1
- package/dist/errors/ErrorDebugger.js +7 -0
- package/dist/errors/ErrorDebugger.js.map +1 -1
- package/dist/errors/ErrorDetector.d.ts.map +1 -1
- package/dist/errors/ErrorDetector.js +12 -0
- package/dist/errors/ErrorDetector.js.map +1 -1
- package/dist/errors/ErrorFormatter.d.ts +1 -1
- package/dist/errors/ErrorFormatter.d.ts.map +1 -1
- package/dist/execution/ExecutionEngine.d.ts.map +1 -1
- package/dist/execution/ExecutionEngine.js +36 -6
- package/dist/execution/ExecutionEngine.js.map +1 -1
- package/dist/execution/ExecutionPlan.d.ts.map +1 -1
- package/dist/execution/ExecutionPlan.js +21 -1
- package/dist/execution/ExecutionPlan.js.map +1 -1
- package/dist/execution/IntentAnalyzer.d.ts.map +1 -1
- package/dist/execution/IntentAnalyzer.js +20 -0
- package/dist/execution/IntentAnalyzer.js.map +1 -1
- package/dist/execution/StepExecutor.d.ts.map +1 -1
- package/dist/execution/StepExecutor.js +109 -29
- package/dist/execution/StepExecutor.js.map +1 -1
- package/dist/execution/WorkflowExecutor.d.ts.map +1 -1
- package/dist/execution/WorkflowExecutor.js +31 -0
- package/dist/execution/WorkflowExecutor.js.map +1 -1
- package/dist/explanation/ExplanationGenerator.d.ts +105 -0
- package/dist/explanation/ExplanationGenerator.d.ts.map +1 -0
- package/dist/explanation/ExplanationGenerator.js +814 -0
- package/dist/explanation/ExplanationGenerator.js.map +1 -0
- package/dist/explanation/ExplanationLogger.d.ts +50 -0
- package/dist/explanation/ExplanationLogger.d.ts.map +1 -0
- package/dist/explanation/ExplanationLogger.js +284 -0
- package/dist/explanation/ExplanationLogger.js.map +1 -0
- package/dist/explanation/ExplanationTypes.d.ts +252 -0
- package/dist/explanation/ExplanationTypes.d.ts.map +1 -0
- package/dist/explanation/ExplanationTypes.js +10 -0
- package/dist/explanation/ExplanationTypes.js.map +1 -0
- package/dist/explanation/index.d.ts +12 -0
- package/dist/explanation/index.d.ts.map +1 -0
- package/dist/explanation/index.js +11 -0
- package/dist/explanation/index.js.map +1 -0
- package/dist/hooks/HookManager.d.ts.map +1 -1
- package/dist/hooks/HookManager.js +8 -0
- package/dist/hooks/HookManager.js.map +1 -1
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/dist/lifecycle/ShutdownManager.d.ts.map +1 -1
- package/dist/lifecycle/ShutdownManager.js +22 -9
- package/dist/lifecycle/ShutdownManager.js.map +1 -1
- package/dist/lifecycle/StartupManager.d.ts.map +1 -1
- package/dist/lifecycle/StartupManager.js +16 -5
- package/dist/lifecycle/StartupManager.js.map +1 -1
- package/dist/loader/WorkflowLoader.d.ts +1 -1
- package/dist/loader/WorkflowLoader.d.ts.map +1 -1
- package/dist/logging/EngineLogger.d.ts +492 -0
- package/dist/logging/EngineLogger.d.ts.map +1 -0
- package/dist/logging/EngineLogger.js +1170 -0
- package/dist/logging/EngineLogger.js.map +1 -0
- package/dist/logging/LoggerManager.d.ts +49 -0
- package/dist/logging/LoggerManager.d.ts.map +1 -0
- package/dist/logging/LoggerManager.js +94 -0
- package/dist/logging/LoggerManager.js.map +1 -0
- package/dist/logging/index.d.ts +2 -1
- package/dist/logging/index.d.ts.map +1 -1
- package/dist/logging/index.js +2 -3
- package/dist/logging/index.js.map +1 -1
- package/dist/parser/SchemaValidator.d.ts.map +1 -1
- package/dist/parser/SchemaValidator.js +6 -0
- package/dist/parser/SchemaValidator.js.map +1 -1
- package/dist/parser/StepParser.d.ts +6 -1
- package/dist/parser/StepParser.d.ts.map +1 -1
- package/dist/parser/StepParser.js +14 -1
- package/dist/parser/StepParser.js.map +1 -1
- package/dist/parser/WorkflowParser.d.ts.map +1 -1
- package/dist/parser/WorkflowParser.js +53 -28
- package/dist/parser/WorkflowParser.js.map +1 -1
- package/dist/scheduling/ScheduleParser.d.ts.map +1 -1
- package/dist/scheduling/ScheduleParser.js +7 -0
- package/dist/scheduling/ScheduleParser.js.map +1 -1
- package/dist/scheduling/Scheduler.d.ts.map +1 -1
- package/dist/scheduling/Scheduler.js +13 -0
- package/dist/scheduling/Scheduler.js.map +1 -1
- package/dist/types/log-types.d.ts +105 -0
- package/dist/types/log-types.d.ts.map +1 -0
- package/dist/types/log-types.js +42 -0
- package/dist/types/log-types.js.map +1 -0
- package/package.json +1 -1
- package/dist/core/EngineLogger.d.ts +0 -182
- package/dist/core/EngineLogger.d.ts.map +0 -1
- package/dist/core/EngineLogger.js +0 -293
- package/dist/core/EngineLogger.js.map +0 -1
- package/dist/logging/LogLevel.d.ts +0 -2
- package/dist/logging/LogLevel.d.ts.map +0 -1
- package/dist/logging/LogLevel.js +0 -2
- package/dist/logging/LogLevel.js.map +0 -1
- package/dist/logging/Logger.d.ts +0 -2
- package/dist/logging/Logger.d.ts.map +0 -1
- package/dist/logging/Logger.js +0 -2
- package/dist/logging/Logger.js.map +0 -1
|
@@ -0,0 +1,814 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Explanation Generator
|
|
3
|
+
*
|
|
4
|
+
* Generates comprehensive execution explanations for workflows.
|
|
5
|
+
* This module is ALWAYS invoked before workflow execution to provide
|
|
6
|
+
* full transparency into what the engine will do.
|
|
7
|
+
*
|
|
8
|
+
* @module explanation
|
|
9
|
+
*/
|
|
10
|
+
/**
|
|
11
|
+
* Explanation Generator
|
|
12
|
+
*
|
|
13
|
+
* Always generates explanations before workflow execution.
|
|
14
|
+
* Integrates with EngineLogger to provide full transparency.
|
|
15
|
+
*/
|
|
16
|
+
export class ExplanationGenerator {
|
|
17
|
+
/**
|
|
18
|
+
* Generate a complete execution explanation
|
|
19
|
+
*
|
|
20
|
+
* This method analyzes the workflow and generates a comprehensive
|
|
21
|
+
* explanation of what will happen during execution. This is always
|
|
22
|
+
* called before workflow execution begins.
|
|
23
|
+
*
|
|
24
|
+
* @param workflow - Parsed workflow to explain
|
|
25
|
+
* @returns Complete execution explanation
|
|
26
|
+
*/
|
|
27
|
+
static generate(workflow) {
|
|
28
|
+
// Determine execution strategy
|
|
29
|
+
const hasParallelSteps = workflow.steps.some(step => step.needs && step.needs.length > 0);
|
|
30
|
+
const allSequential = workflow.steps.every((step, idx) => !step.needs || step.needs.length === 0 || (idx > 0 && step.needs.includes(workflow.steps[idx - 1].id)));
|
|
31
|
+
const executionStrategy = !hasParallelSteps ? 'sequential' : allSequential ? 'sequential' : 'mixed';
|
|
32
|
+
// Build dependency graph
|
|
33
|
+
const dependencyGraph = {};
|
|
34
|
+
for (const step of workflow.steps) {
|
|
35
|
+
dependencyGraph[step.id] = step.needs || [];
|
|
36
|
+
}
|
|
37
|
+
// Detect cycles
|
|
38
|
+
const { hasCycles, cycles } = this.detectCycles(workflow.steps);
|
|
39
|
+
// Map steps to explained steps
|
|
40
|
+
const explainedSteps = workflow.steps.map(step => this.buildExplainedStep(step, workflow));
|
|
41
|
+
// Extract unique adapters used and their actions
|
|
42
|
+
const adapterActionsMap = new Map();
|
|
43
|
+
for (const step of workflow.steps) {
|
|
44
|
+
const parts = step.action.split('.');
|
|
45
|
+
const adapter = parts[0];
|
|
46
|
+
const action = parts.slice(1).join('.');
|
|
47
|
+
if (!adapterActionsMap.has(adapter)) {
|
|
48
|
+
adapterActionsMap.set(adapter, new Set());
|
|
49
|
+
}
|
|
50
|
+
adapterActionsMap.get(adapter).add(action);
|
|
51
|
+
}
|
|
52
|
+
const adaptersUsed = Array.from(adapterActionsMap.keys()).sort();
|
|
53
|
+
const adapterActions = {};
|
|
54
|
+
for (const [adapter, actions] of adapterActionsMap) {
|
|
55
|
+
adapterActions[adapter] = Array.from(actions).sort();
|
|
56
|
+
}
|
|
57
|
+
// Analyze workflow inputs
|
|
58
|
+
const requiredInputs = [];
|
|
59
|
+
const optionalInputs = {};
|
|
60
|
+
if (workflow.inputs) {
|
|
61
|
+
for (const [key, value] of Object.entries(workflow.inputs)) {
|
|
62
|
+
if (key.startsWith('_'))
|
|
63
|
+
continue; // Skip internal fields
|
|
64
|
+
// Check if input has 'required' field
|
|
65
|
+
if (typeof value === 'object' && value !== null) {
|
|
66
|
+
if (value.required === true) {
|
|
67
|
+
requiredInputs.push(key);
|
|
68
|
+
}
|
|
69
|
+
else if (value.default !== undefined) {
|
|
70
|
+
optionalInputs[key] = value.default;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
// Calculate workflow complexity
|
|
76
|
+
const maxDepth = this.calculateMaxDepth(workflow.steps, dependencyGraph);
|
|
77
|
+
const parallelizableSteps = workflow.steps.filter(step => step.needs && step.needs.length > 0).length;
|
|
78
|
+
const sequentialSteps = workflow.steps.length - parallelizableSteps;
|
|
79
|
+
// Build explanation object
|
|
80
|
+
const explanation = {
|
|
81
|
+
workflowName: workflow.metadata?.name || workflow.name,
|
|
82
|
+
description: workflow.metadata?.description || workflow.description,
|
|
83
|
+
version: workflow.version,
|
|
84
|
+
kind: workflow.kind,
|
|
85
|
+
stepCount: workflow.steps.length,
|
|
86
|
+
executionStrategy,
|
|
87
|
+
steps: explainedSteps,
|
|
88
|
+
hasCycles,
|
|
89
|
+
cycles: hasCycles ? cycles : undefined,
|
|
90
|
+
dependencyGraph,
|
|
91
|
+
adaptersUsed,
|
|
92
|
+
adapterActions,
|
|
93
|
+
complexity: {
|
|
94
|
+
totalSteps: workflow.steps.length,
|
|
95
|
+
maxDepth,
|
|
96
|
+
parallelizableSteps,
|
|
97
|
+
sequentialSteps,
|
|
98
|
+
},
|
|
99
|
+
};
|
|
100
|
+
// Add optional workflow-level fields
|
|
101
|
+
if (workflow.inputs) {
|
|
102
|
+
explanation.inputs = this.filterInternalFields(workflow.inputs);
|
|
103
|
+
if (requiredInputs.length > 0) {
|
|
104
|
+
explanation.requiredInputs = requiredInputs;
|
|
105
|
+
}
|
|
106
|
+
if (Object.keys(optionalInputs).length > 0) {
|
|
107
|
+
explanation.optionalInputs = optionalInputs;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
if (workflow.secrets) {
|
|
111
|
+
// Only show secret keys, never values
|
|
112
|
+
explanation.secrets = {
|
|
113
|
+
vault: workflow.secrets.vault,
|
|
114
|
+
keys: workflow.secrets.refs ? Object.keys(workflow.secrets.refs) : undefined,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
if (workflow.context) {
|
|
118
|
+
explanation.context = this.filterInternalFields(workflow.context);
|
|
119
|
+
}
|
|
120
|
+
if (workflow.outputs) {
|
|
121
|
+
explanation.outputs = this.filterInternalFields(workflow.outputs);
|
|
122
|
+
}
|
|
123
|
+
if (workflow.defaults) {
|
|
124
|
+
explanation.defaults = {
|
|
125
|
+
timeout: workflow.defaults.timeout,
|
|
126
|
+
adapter: workflow.defaults.adapter,
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
if (workflow.policies) {
|
|
130
|
+
explanation.policies = workflow.policies;
|
|
131
|
+
}
|
|
132
|
+
if (workflow.metadata?.tags || workflow.tags) {
|
|
133
|
+
explanation.tags = workflow.metadata?.tags || workflow.tags;
|
|
134
|
+
}
|
|
135
|
+
if (workflow.metadata?.owner || workflow.owner) {
|
|
136
|
+
explanation.owner = workflow.metadata?.owner || workflow.owner;
|
|
137
|
+
}
|
|
138
|
+
if (workflow.annotations) {
|
|
139
|
+
explanation.annotations = this.filterInternalFields(workflow.annotations);
|
|
140
|
+
}
|
|
141
|
+
// ============================================================================
|
|
142
|
+
// HIGH PRIORITY DYNAMIC IMPROVEMENTS
|
|
143
|
+
// ============================================================================
|
|
144
|
+
// 1. Runtime Data Prediction
|
|
145
|
+
explanation.dataFlow = this.analyzeDataFlow(workflow, dependencyGraph);
|
|
146
|
+
// 2. Conditional Path Analysis
|
|
147
|
+
explanation.conditionalPaths = this.analyzeConditionalPaths(workflow, dependencyGraph);
|
|
148
|
+
// 3. Execution Time Estimation
|
|
149
|
+
explanation.timeEstimation = this.estimateExecutionTime(workflow, dependencyGraph);
|
|
150
|
+
return explanation;
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Build an explained step from a parsed step
|
|
154
|
+
*
|
|
155
|
+
* @param step - Parsed step
|
|
156
|
+
* @param _workflow - Full workflow (reserved for future cross-step reference analysis)
|
|
157
|
+
* @returns Explained step
|
|
158
|
+
*/
|
|
159
|
+
static buildExplainedStep(step, _workflow) {
|
|
160
|
+
const explained = {
|
|
161
|
+
id: step.id,
|
|
162
|
+
name: step.name,
|
|
163
|
+
uses: step.action,
|
|
164
|
+
needs: step.needs || [],
|
|
165
|
+
adapter: step.adapter
|
|
166
|
+
};
|
|
167
|
+
// Add optional fields
|
|
168
|
+
if (step.when) {
|
|
169
|
+
explained.when = step.when;
|
|
170
|
+
}
|
|
171
|
+
if (step.timeout) {
|
|
172
|
+
explained.timeout = step.timeout;
|
|
173
|
+
}
|
|
174
|
+
if (step.retry) {
|
|
175
|
+
explained.retry = {
|
|
176
|
+
max: step.retry.max, // Configuration: max retries allowed
|
|
177
|
+
backoff: step.retry.backoff,
|
|
178
|
+
delay: step.retry.delay,
|
|
179
|
+
};
|
|
180
|
+
// Note: 'count' is runtime state, not shown in explanation
|
|
181
|
+
}
|
|
182
|
+
if (step.continueOnError !== undefined) {
|
|
183
|
+
explained.continueOnError = step.continueOnError;
|
|
184
|
+
}
|
|
185
|
+
if (step.input) {
|
|
186
|
+
explained.with = step.input;
|
|
187
|
+
// Analyze input to find workflow input references
|
|
188
|
+
const inputsReferenced = this.extractVariableReferences(step.input, 'inputs');
|
|
189
|
+
if (inputsReferenced.length > 0) {
|
|
190
|
+
explained.inputsReferenced = inputsReferenced;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
if (step.env) {
|
|
194
|
+
explained.env = step.env;
|
|
195
|
+
// Analyze env to find secrets
|
|
196
|
+
const secretsUsed = this.extractVariableReferences(step.env, 'secrets');
|
|
197
|
+
if (secretsUsed.length > 0) {
|
|
198
|
+
explained.secretsUsed = secretsUsed;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
if (step.outputs) {
|
|
202
|
+
explained.outputs = step.outputs;
|
|
203
|
+
}
|
|
204
|
+
return explained;
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Detect circular dependencies in workflow steps
|
|
208
|
+
*/
|
|
209
|
+
static detectCycles(steps) {
|
|
210
|
+
const graph = new Map();
|
|
211
|
+
// Build adjacency list
|
|
212
|
+
for (const step of steps) {
|
|
213
|
+
graph.set(step.id, step.needs || []);
|
|
214
|
+
}
|
|
215
|
+
const visited = new Set();
|
|
216
|
+
const recursionStack = new Set();
|
|
217
|
+
const cycles = [];
|
|
218
|
+
const currentPath = [];
|
|
219
|
+
function dfs(nodeId) {
|
|
220
|
+
if (!graph.has(nodeId))
|
|
221
|
+
return false;
|
|
222
|
+
visited.add(nodeId);
|
|
223
|
+
recursionStack.add(nodeId);
|
|
224
|
+
currentPath.push(nodeId);
|
|
225
|
+
const neighbors = graph.get(nodeId) || [];
|
|
226
|
+
for (const neighbor of neighbors) {
|
|
227
|
+
if (!visited.has(neighbor)) {
|
|
228
|
+
if (dfs(neighbor)) {
|
|
229
|
+
return true;
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
else if (recursionStack.has(neighbor)) {
|
|
233
|
+
// Found a cycle - extract it from current path
|
|
234
|
+
const cycleStart = currentPath.indexOf(neighbor);
|
|
235
|
+
if (cycleStart !== -1) {
|
|
236
|
+
cycles.push([...currentPath.slice(cycleStart), neighbor]);
|
|
237
|
+
}
|
|
238
|
+
return true;
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
recursionStack.delete(nodeId);
|
|
242
|
+
currentPath.pop();
|
|
243
|
+
return false;
|
|
244
|
+
}
|
|
245
|
+
// Check each node
|
|
246
|
+
for (const [nodeId] of graph) {
|
|
247
|
+
if (!visited.has(nodeId)) {
|
|
248
|
+
dfs(nodeId);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
return {
|
|
252
|
+
hasCycles: cycles.length > 0,
|
|
253
|
+
cycles,
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Calculate maximum dependency depth in the workflow
|
|
258
|
+
*/
|
|
259
|
+
static calculateMaxDepth(steps, dependencyGraph) {
|
|
260
|
+
const depths = new Map();
|
|
261
|
+
// Calculate depth for each step recursively
|
|
262
|
+
function getDepth(stepId, visited = new Set()) {
|
|
263
|
+
// Return cached depth if already calculated
|
|
264
|
+
if (depths.has(stepId)) {
|
|
265
|
+
return depths.get(stepId);
|
|
266
|
+
}
|
|
267
|
+
// Detect cycles
|
|
268
|
+
if (visited.has(stepId)) {
|
|
269
|
+
return 0;
|
|
270
|
+
}
|
|
271
|
+
visited.add(stepId);
|
|
272
|
+
const dependencies = dependencyGraph[stepId] || [];
|
|
273
|
+
if (dependencies.length === 0) {
|
|
274
|
+
depths.set(stepId, 1);
|
|
275
|
+
return 1;
|
|
276
|
+
}
|
|
277
|
+
// Depth is 1 + max depth of dependencies
|
|
278
|
+
const maxDependencyDepth = Math.max(...dependencies.map(dep => getDepth(dep, new Set(visited))));
|
|
279
|
+
const depth = maxDependencyDepth + 1;
|
|
280
|
+
depths.set(stepId, depth);
|
|
281
|
+
return depth;
|
|
282
|
+
}
|
|
283
|
+
// Calculate depth for all steps
|
|
284
|
+
for (const step of steps) {
|
|
285
|
+
getDepth(step.id);
|
|
286
|
+
}
|
|
287
|
+
// Return maximum depth
|
|
288
|
+
return depths.size > 0 ? Math.max(...Array.from(depths.values())) : 1;
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Extract variable references from an object
|
|
292
|
+
*/
|
|
293
|
+
static extractVariableReferences(obj, prefix) {
|
|
294
|
+
const references = new Set();
|
|
295
|
+
// Regex to match ${prefix.variable} or ${{prefix.variable}}
|
|
296
|
+
const regex = new RegExp(`\\$\\{?\\{?\\s*${prefix}\\.([a-zA-Z0-9_]+)`, 'g');
|
|
297
|
+
function search(value) {
|
|
298
|
+
if (typeof value === 'string') {
|
|
299
|
+
let match;
|
|
300
|
+
while ((match = regex.exec(value)) !== null) {
|
|
301
|
+
references.add(match[1]);
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
else if (typeof value === 'object' && value !== null) {
|
|
305
|
+
for (const v of Object.values(value)) {
|
|
306
|
+
search(v);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
search(obj);
|
|
311
|
+
return Array.from(references).sort();
|
|
312
|
+
}
|
|
313
|
+
/**
|
|
314
|
+
* Filter out internal fields (those starting with _)
|
|
315
|
+
*/
|
|
316
|
+
static filterInternalFields(obj) {
|
|
317
|
+
if (!obj)
|
|
318
|
+
return undefined;
|
|
319
|
+
const filtered = {};
|
|
320
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
321
|
+
if (!key.startsWith('_')) {
|
|
322
|
+
filtered[key] = value;
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
return Object.keys(filtered).length > 0 ? filtered : undefined;
|
|
326
|
+
}
|
|
327
|
+
/**
|
|
328
|
+
* Analyze data flow throughout the workflow
|
|
329
|
+
* HIGH PRIORITY: Runtime Data Prediction
|
|
330
|
+
*
|
|
331
|
+
* Predicts what data flows through each step by analyzing:
|
|
332
|
+
* - Workflow inputs referenced
|
|
333
|
+
* - Step outputs consumed by other steps
|
|
334
|
+
* - Context and environment variables
|
|
335
|
+
* - Secrets usage
|
|
336
|
+
*/
|
|
337
|
+
static analyzeDataFlow(workflow, dependencyGraph) {
|
|
338
|
+
const dataFlow = [];
|
|
339
|
+
const stepOutputs = new Map(); // step -> output keys
|
|
340
|
+
// First pass: collect all step outputs
|
|
341
|
+
for (const step of workflow.steps) {
|
|
342
|
+
if (step.outputs) {
|
|
343
|
+
stepOutputs.set(step.id, new Set(Object.keys(step.outputs)));
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
// Second pass: analyze data flow for each step
|
|
347
|
+
for (const step of workflow.steps) {
|
|
348
|
+
const inputs = [];
|
|
349
|
+
const outputs = [];
|
|
350
|
+
// Analyze inputs from step.input (with field)
|
|
351
|
+
if (step.input) {
|
|
352
|
+
// Extract workflow inputs
|
|
353
|
+
const workflowInputRefs = this.extractVariableReferences(step.input, 'inputs');
|
|
354
|
+
for (const key of workflowInputRefs) {
|
|
355
|
+
inputs.push({
|
|
356
|
+
source: 'workflow.inputs',
|
|
357
|
+
key,
|
|
358
|
+
type: this.inferType(workflow.inputs?.[key]),
|
|
359
|
+
example: workflow.inputs?.[key]?.default,
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
// Extract step output references
|
|
363
|
+
const stepOutputRefs = this.extractVariableReferences(step.input, 'steps');
|
|
364
|
+
for (const ref of stepOutputRefs) {
|
|
365
|
+
// ref format: "stepId.outputKey"
|
|
366
|
+
const [sourceStepId, outputKey] = ref.split('.');
|
|
367
|
+
if (sourceStepId && outputKey) {
|
|
368
|
+
inputs.push({
|
|
369
|
+
source: 'step.output',
|
|
370
|
+
key: outputKey,
|
|
371
|
+
sourceStep: sourceStepId,
|
|
372
|
+
type: 'unknown',
|
|
373
|
+
});
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
// Extract context references
|
|
377
|
+
const contextRefs = this.extractVariableReferences(step.input, 'context');
|
|
378
|
+
for (const key of contextRefs) {
|
|
379
|
+
inputs.push({
|
|
380
|
+
source: 'context',
|
|
381
|
+
key,
|
|
382
|
+
type: this.inferType(workflow.context?.[key]),
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
// Static values
|
|
386
|
+
for (const [key, value] of Object.entries(step.input)) {
|
|
387
|
+
if (typeof value !== 'string' || !value.includes('${')) {
|
|
388
|
+
inputs.push({
|
|
389
|
+
source: 'static',
|
|
390
|
+
key,
|
|
391
|
+
type: typeof value,
|
|
392
|
+
example: value,
|
|
393
|
+
});
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
// Analyze environment variables
|
|
398
|
+
if (step.env) {
|
|
399
|
+
const secretRefs = this.extractVariableReferences(step.env, 'secrets');
|
|
400
|
+
for (const key of secretRefs) {
|
|
401
|
+
inputs.push({
|
|
402
|
+
source: 'secrets',
|
|
403
|
+
key,
|
|
404
|
+
type: 'string', // Secrets are always strings
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
const envContextRefs = this.extractVariableReferences(step.env, 'context');
|
|
408
|
+
for (const key of envContextRefs) {
|
|
409
|
+
inputs.push({
|
|
410
|
+
source: 'context',
|
|
411
|
+
key,
|
|
412
|
+
type: this.inferType(workflow.context?.[key]),
|
|
413
|
+
});
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
// Analyze outputs
|
|
417
|
+
if (step.outputs) {
|
|
418
|
+
for (const [key, value] of Object.entries(step.outputs)) {
|
|
419
|
+
// Find which steps use this output
|
|
420
|
+
const usedBy = [];
|
|
421
|
+
for (const otherStep of workflow.steps) {
|
|
422
|
+
if (otherStep.id === step.id)
|
|
423
|
+
continue;
|
|
424
|
+
// Check if this output is referenced in other step's inputs
|
|
425
|
+
const inputStr = JSON.stringify(otherStep.input || {});
|
|
426
|
+
if (inputStr.includes(`steps.${step.id}.${key}`) ||
|
|
427
|
+
inputStr.includes(`steps.${step.id}[${key}]`)) {
|
|
428
|
+
usedBy.push(otherStep.id);
|
|
429
|
+
}
|
|
430
|
+
// Check in env
|
|
431
|
+
const envStr = JSON.stringify(otherStep.env || {});
|
|
432
|
+
if (envStr.includes(`steps.${step.id}.${key}`)) {
|
|
433
|
+
usedBy.push(otherStep.id);
|
|
434
|
+
}
|
|
435
|
+
}
|
|
436
|
+
outputs.push({
|
|
437
|
+
key,
|
|
438
|
+
type: typeof value === 'string' ? 'string' : this.inferType(value),
|
|
439
|
+
usedBy,
|
|
440
|
+
});
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
// Check for unresolved dependencies
|
|
444
|
+
const deps = dependencyGraph[step.id] || [];
|
|
445
|
+
const hasUnresolvedDependencies = deps.some(depId => !workflow.steps.find(s => s.id === depId));
|
|
446
|
+
dataFlow.push({
|
|
447
|
+
step: step.id,
|
|
448
|
+
stepName: step.name,
|
|
449
|
+
inputs,
|
|
450
|
+
outputs,
|
|
451
|
+
hasUnresolvedDependencies,
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
return dataFlow;
|
|
455
|
+
}
|
|
456
|
+
/**
|
|
457
|
+
* Analyze all possible conditional execution paths
|
|
458
|
+
* HIGH PRIORITY: Conditional Path Analysis
|
|
459
|
+
*
|
|
460
|
+
* Analyzes workflow to determine:
|
|
461
|
+
* - All possible execution paths based on conditions
|
|
462
|
+
* - Steps that may be skipped
|
|
463
|
+
* - Unreachable code
|
|
464
|
+
*/
|
|
465
|
+
static analyzeConditionalPaths(workflow, dependencyGraph) {
|
|
466
|
+
const conditionalSteps = workflow.steps
|
|
467
|
+
.filter(step => step.when)
|
|
468
|
+
.map(step => ({
|
|
469
|
+
step: step.id,
|
|
470
|
+
condition: step.when,
|
|
471
|
+
canBeSkipped: true,
|
|
472
|
+
}));
|
|
473
|
+
// Simple path analysis (can be enhanced with boolean logic solver)
|
|
474
|
+
const paths = [];
|
|
475
|
+
if (conditionalSteps.length === 0) {
|
|
476
|
+
// No conditions - single path where all steps execute
|
|
477
|
+
paths.push({
|
|
478
|
+
description: 'Default path (no conditions)',
|
|
479
|
+
conditions: [],
|
|
480
|
+
stepsExecuted: workflow.steps.map(s => s.id),
|
|
481
|
+
stepsSkipped: [],
|
|
482
|
+
likelihood: 'always',
|
|
483
|
+
});
|
|
484
|
+
}
|
|
485
|
+
else {
|
|
486
|
+
// Path 1: All conditions true
|
|
487
|
+
paths.push({
|
|
488
|
+
description: 'All conditional steps execute',
|
|
489
|
+
conditions: conditionalSteps.map(c => `${c.step}: ${c.condition}`),
|
|
490
|
+
stepsExecuted: workflow.steps.map(s => s.id),
|
|
491
|
+
stepsSkipped: [],
|
|
492
|
+
likelihood: 'possible',
|
|
493
|
+
});
|
|
494
|
+
// Path 2: All conditions false
|
|
495
|
+
const conditionalStepIds = new Set(conditionalSteps.map(c => c.step));
|
|
496
|
+
paths.push({
|
|
497
|
+
description: 'All conditional steps skipped',
|
|
498
|
+
conditions: conditionalSteps.map(c => `${c.step}: NOT (${c.condition})`),
|
|
499
|
+
stepsExecuted: workflow.steps.filter(s => !conditionalStepIds.has(s.id)).map(s => s.id),
|
|
500
|
+
stepsSkipped: Array.from(conditionalStepIds),
|
|
501
|
+
likelihood: 'possible',
|
|
502
|
+
});
|
|
503
|
+
// For simplicity, we'll just show these two extremes
|
|
504
|
+
// A more advanced implementation would generate all 2^n combinations
|
|
505
|
+
}
|
|
506
|
+
// Find unreachable steps (steps that depend on other steps that are always skipped)
|
|
507
|
+
const unreachableSteps = [];
|
|
508
|
+
// Find steps that always execute (no conditions and no dependencies on conditional steps)
|
|
509
|
+
const alwaysExecutes = workflow.steps
|
|
510
|
+
.filter(step => {
|
|
511
|
+
// No condition on this step
|
|
512
|
+
if (step.when)
|
|
513
|
+
return false;
|
|
514
|
+
// No dependencies on conditional steps
|
|
515
|
+
const deps = dependencyGraph[step.id] || [];
|
|
516
|
+
return !deps.some(depId => conditionalSteps.some(cs => cs.step === depId));
|
|
517
|
+
})
|
|
518
|
+
.map(s => s.id);
|
|
519
|
+
return {
|
|
520
|
+
totalPaths: paths.length,
|
|
521
|
+
paths,
|
|
522
|
+
conditionalSteps,
|
|
523
|
+
unreachableSteps,
|
|
524
|
+
alwaysExecutes,
|
|
525
|
+
};
|
|
526
|
+
}
|
|
527
|
+
/**
|
|
528
|
+
* Estimate execution time for the workflow
|
|
529
|
+
* HIGH PRIORITY: Execution Time Estimation
|
|
530
|
+
*
|
|
531
|
+
* Predicts workflow execution duration by:
|
|
532
|
+
* - Analyzing dependency chains
|
|
533
|
+
* - Finding critical path
|
|
534
|
+
* - Estimating per-step execution time
|
|
535
|
+
* - Identifying bottlenecks
|
|
536
|
+
*/
|
|
537
|
+
static estimateExecutionTime(workflow, dependencyGraph) {
|
|
538
|
+
// Default time estimates per adapter type (in milliseconds)
|
|
539
|
+
const adapterTimeEstimates = {
|
|
540
|
+
shell: { min: 100, avg: 1000, max: 5000 },
|
|
541
|
+
http: { min: 50, avg: 500, max: 3000 },
|
|
542
|
+
fs: { min: 10, avg: 100, max: 1000 },
|
|
543
|
+
cli: { min: 100, avg: 1000, max: 5000 },
|
|
544
|
+
mediaproc: { min: 1000, avg: 10000, max: 60000 },
|
|
545
|
+
default: { min: 100, avg: 1000, max: 5000 },
|
|
546
|
+
};
|
|
547
|
+
// Calculate time for each step
|
|
548
|
+
const byStep = workflow.steps.map(step => {
|
|
549
|
+
const adapter = step.adapter || step.action.split('.')[0];
|
|
550
|
+
const estimate = adapterTimeEstimates[adapter] || adapterTimeEstimates.default;
|
|
551
|
+
// Adjust for timeout if specified
|
|
552
|
+
let timeEstimate = { ...estimate };
|
|
553
|
+
if (step.timeout) {
|
|
554
|
+
const timeout = typeof step.timeout === 'string'
|
|
555
|
+
? parseInt(step.timeout)
|
|
556
|
+
: step.timeout;
|
|
557
|
+
if (!isNaN(timeout)) {
|
|
558
|
+
timeEstimate.max = Math.min(timeEstimate.max, timeout);
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
return {
|
|
562
|
+
step: step.id,
|
|
563
|
+
...timeEstimate,
|
|
564
|
+
onCriticalPath: false, // Will be updated
|
|
565
|
+
};
|
|
566
|
+
});
|
|
567
|
+
// Group steps by execution phase (based on dependency depth)
|
|
568
|
+
const stepDepths = new Map();
|
|
569
|
+
const calculateDepth = (stepId, visited = new Set()) => {
|
|
570
|
+
if (stepDepths.has(stepId))
|
|
571
|
+
return stepDepths.get(stepId);
|
|
572
|
+
if (visited.has(stepId))
|
|
573
|
+
return 0;
|
|
574
|
+
visited.add(stepId);
|
|
575
|
+
const deps = dependencyGraph[stepId] || [];
|
|
576
|
+
const depth = deps.length === 0
|
|
577
|
+
? 0
|
|
578
|
+
: Math.max(...deps.map(d => calculateDepth(d, new Set(visited)))) + 1;
|
|
579
|
+
stepDepths.set(stepId, depth);
|
|
580
|
+
return depth;
|
|
581
|
+
};
|
|
582
|
+
workflow.steps.forEach(step => calculateDepth(step.id));
|
|
583
|
+
const maxPhase = Math.max(...Array.from(stepDepths.values()));
|
|
584
|
+
// Build by-phase estimates
|
|
585
|
+
const byPhase = [];
|
|
586
|
+
for (let phase = 0; phase <= maxPhase; phase++) {
|
|
587
|
+
const stepsInPhase = workflow.steps
|
|
588
|
+
.filter(step => stepDepths.get(step.id) === phase)
|
|
589
|
+
.map(s => s.id);
|
|
590
|
+
if (stepsInPhase.length === 0)
|
|
591
|
+
continue;
|
|
592
|
+
// In parallel execution, phase time = max of any step in that phase
|
|
593
|
+
// In sequential, phase time = sum of all steps
|
|
594
|
+
const stepTimes = stepsInPhase.map(stepId => byStep.find(s => s.step === stepId));
|
|
595
|
+
byPhase.push({
|
|
596
|
+
phase,
|
|
597
|
+
steps: stepsInPhase,
|
|
598
|
+
duration: {
|
|
599
|
+
min: Math.max(...stepTimes.map(s => s.min)),
|
|
600
|
+
avg: Math.max(...stepTimes.map(s => s.avg)),
|
|
601
|
+
max: Math.max(...stepTimes.map(s => s.max)),
|
|
602
|
+
},
|
|
603
|
+
});
|
|
604
|
+
}
|
|
605
|
+
// Find critical path (longest dependency chain)
|
|
606
|
+
const findCriticalPath = () => {
|
|
607
|
+
const pathDurations = new Map();
|
|
608
|
+
const calculatePath = (stepId, visited = new Set()) => {
|
|
609
|
+
if (pathDurations.has(stepId))
|
|
610
|
+
return pathDurations.get(stepId);
|
|
611
|
+
if (visited.has(stepId))
|
|
612
|
+
return { duration: 0, path: [] };
|
|
613
|
+
visited.add(stepId);
|
|
614
|
+
const deps = dependencyGraph[stepId] || [];
|
|
615
|
+
const stepTime = byStep.find(s => s.step === stepId).avg;
|
|
616
|
+
if (deps.length === 0) {
|
|
617
|
+
return { duration: stepTime, path: [stepId] };
|
|
618
|
+
}
|
|
619
|
+
const depPaths = deps.map(d => calculatePath(d, new Set(visited)));
|
|
620
|
+
const longestDep = depPaths.reduce((max, curr) => curr.duration > max.duration ? curr : max);
|
|
621
|
+
const result = {
|
|
622
|
+
duration: longestDep.duration + stepTime,
|
|
623
|
+
path: [...longestDep.path, stepId],
|
|
624
|
+
};
|
|
625
|
+
pathDurations.set(stepId, result);
|
|
626
|
+
return result;
|
|
627
|
+
};
|
|
628
|
+
// Find the longest path among all end nodes (steps with no dependents)
|
|
629
|
+
const endNodes = workflow.steps.filter(step => {
|
|
630
|
+
return !workflow.steps.some(other => (other.needs || []).includes(step.id));
|
|
631
|
+
});
|
|
632
|
+
const allPaths = endNodes.map(step => calculatePath(step.id));
|
|
633
|
+
const longestPath = allPaths.reduce((max, curr) => curr.duration > max.duration ? curr : max, { duration: 0, path: [] });
|
|
634
|
+
// Mark critical path steps
|
|
635
|
+
longestPath.path.forEach((stepId) => {
|
|
636
|
+
const s = byStep.find(s => s.step === stepId);
|
|
637
|
+
if (s)
|
|
638
|
+
s.onCriticalPath = true;
|
|
639
|
+
});
|
|
640
|
+
return { steps: longestPath.path, duration: longestPath.duration };
|
|
641
|
+
};
|
|
642
|
+
const criticalPath = findCriticalPath();
|
|
643
|
+
// Calculate total time
|
|
644
|
+
const total = {
|
|
645
|
+
min: byPhase.reduce((sum, p) => sum + p.duration.min, 0),
|
|
646
|
+
avg: byPhase.reduce((sum, p) => sum + p.duration.avg, 0),
|
|
647
|
+
max: byPhase.reduce((sum, p) => sum + p.duration.max, 0),
|
|
648
|
+
};
|
|
649
|
+
// Identify bottlenecks (steps on critical path with long duration)
|
|
650
|
+
const avgStepTime = total.avg / workflow.steps.length;
|
|
651
|
+
const bottlenecks = byStep
|
|
652
|
+
.filter(s => s.onCriticalPath && s.avg > avgStepTime * 1.5)
|
|
653
|
+
.map(s => ({
|
|
654
|
+
step: s.step,
|
|
655
|
+
reason: `On critical path with ${s.avg}ms avg execution time`,
|
|
656
|
+
impact: Math.round(s.avg * 0.5), // Assume 50% optimization potential
|
|
657
|
+
}));
|
|
658
|
+
return {
|
|
659
|
+
total,
|
|
660
|
+
byPhase,
|
|
661
|
+
criticalPath,
|
|
662
|
+
bottlenecks,
|
|
663
|
+
byStep,
|
|
664
|
+
};
|
|
665
|
+
}
|
|
666
|
+
/**
|
|
667
|
+
* Infer type from workflow value definition
|
|
668
|
+
*/
|
|
669
|
+
static inferType(value) {
|
|
670
|
+
if (!value)
|
|
671
|
+
return 'unknown';
|
|
672
|
+
if (typeof value !== 'object')
|
|
673
|
+
return typeof value;
|
|
674
|
+
if (value.type)
|
|
675
|
+
return value.type;
|
|
676
|
+
if (value.default !== undefined)
|
|
677
|
+
return typeof value.default;
|
|
678
|
+
return 'unknown';
|
|
679
|
+
}
|
|
680
|
+
/**
|
|
681
|
+
* Generate human-readable sentences from structured logs
|
|
682
|
+
*
|
|
683
|
+
* This method converts the JSON-formatted explanation logs into
|
|
684
|
+
* natural language sentences that describe the workflow execution plan.
|
|
685
|
+
*
|
|
686
|
+
* @param explanation - The execution explanation to convert
|
|
687
|
+
* @returns Array of human-readable sentences
|
|
688
|
+
*/
|
|
689
|
+
static generateSentencesFromLogs(explanation) {
|
|
690
|
+
const sentences = [];
|
|
691
|
+
// Workflow overview sentence
|
|
692
|
+
const workflowType = this.inferWorkflowType(explanation);
|
|
693
|
+
sentences.push(`This workflow "${explanation.workflowName}" is designed to ${workflowType}.`);
|
|
694
|
+
// Execution strategy sentence
|
|
695
|
+
if (explanation.executionStrategy === 'sequential') {
|
|
696
|
+
sentences.push(`It will execute ${explanation.stepCount} steps sequentially, one after another.`);
|
|
697
|
+
}
|
|
698
|
+
else if (explanation.executionStrategy === 'parallel') {
|
|
699
|
+
sentences.push(`It will execute all ${explanation.stepCount} steps in parallel for maximum efficiency.`);
|
|
700
|
+
}
|
|
701
|
+
else {
|
|
702
|
+
sentences.push(`It will execute ${explanation.stepCount} steps using a mixed strategy, ` +
|
|
703
|
+
`with ${explanation.complexity?.parallelizableSteps || 0} steps running in parallel ` +
|
|
704
|
+
`and ${explanation.complexity?.sequentialSteps || 0} steps running sequentially.`);
|
|
705
|
+
}
|
|
706
|
+
// Adapters sentence
|
|
707
|
+
if (explanation.adaptersUsed.length > 0) {
|
|
708
|
+
const adapterList = explanation.adaptersUsed.join(', ');
|
|
709
|
+
sentences.push(`The workflow uses ${explanation.adaptersUsed.length} adapter(s): ${adapterList}.`);
|
|
710
|
+
}
|
|
711
|
+
// Inputs sentence
|
|
712
|
+
if (explanation.requiredInputs && explanation.requiredInputs.length > 0) {
|
|
713
|
+
sentences.push(`Required inputs: ${explanation.requiredInputs.join(', ')}.`);
|
|
714
|
+
}
|
|
715
|
+
// Time estimation sentence
|
|
716
|
+
if (explanation.timeEstimation) {
|
|
717
|
+
const { total } = explanation.timeEstimation;
|
|
718
|
+
sentences.push(`Estimated execution time: ${total.min}-${total.max}ms (average: ${total.avg}ms).`);
|
|
719
|
+
if (explanation.timeEstimation.bottlenecks.length > 0) {
|
|
720
|
+
const bottleneck = explanation.timeEstimation.bottlenecks[0];
|
|
721
|
+
sentences.push(`Potential bottleneck identified in step "${bottleneck.step}": ${bottleneck.reason}.`);
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
// Critical path sentence
|
|
725
|
+
if (explanation.timeEstimation?.criticalPath) {
|
|
726
|
+
const { steps } = explanation.timeEstimation.criticalPath;
|
|
727
|
+
sentences.push(`Critical path (longest dependency chain): ${steps.join(' → ')}.`);
|
|
728
|
+
}
|
|
729
|
+
// Data flow sentence
|
|
730
|
+
if (explanation.dataFlow && explanation.dataFlow.length > 0) {
|
|
731
|
+
const stepsWithInputs = explanation.dataFlow.filter(df => df.inputs.some(i => i.source === 'workflow.inputs')).length;
|
|
732
|
+
const stepsWithOutputs = explanation.dataFlow.filter(df => df.outputs.length > 0).length;
|
|
733
|
+
if (stepsWithInputs > 0 || stepsWithOutputs > 0) {
|
|
734
|
+
sentences.push(`Data flow: ${stepsWithInputs} step(s) consume workflow inputs, ` +
|
|
735
|
+
`${stepsWithOutputs} step(s) produce outputs.`);
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
// Conditional paths sentence
|
|
739
|
+
if (explanation.conditionalPaths && explanation.conditionalPaths.conditionalSteps.length > 0) {
|
|
740
|
+
sentences.push(`The workflow has ${explanation.conditionalPaths.conditionalSteps.length} conditional step(s) ` +
|
|
741
|
+
`that may be skipped based on runtime conditions.`);
|
|
742
|
+
}
|
|
743
|
+
// Cycles warning sentence
|
|
744
|
+
if (explanation.hasCycles) {
|
|
745
|
+
sentences.push(`⚠️ WARNING: Circular dependencies detected! This workflow cannot be executed safely.`);
|
|
746
|
+
}
|
|
747
|
+
// Step-by-step execution plan
|
|
748
|
+
sentences.push('\nExecution plan:');
|
|
749
|
+
explanation.steps.forEach((step, index) => {
|
|
750
|
+
const stepNum = index + 1;
|
|
751
|
+
let stepSentence = ` ${stepNum}. "${step.name || step.id}" will execute "${step.uses}"`;
|
|
752
|
+
if (step.needs.length > 0) {
|
|
753
|
+
stepSentence += ` after completing: ${step.needs.join(', ')}`;
|
|
754
|
+
}
|
|
755
|
+
if (step.when) {
|
|
756
|
+
stepSentence += ` (conditional: ${step.when})`;
|
|
757
|
+
}
|
|
758
|
+
stepSentence += '.';
|
|
759
|
+
sentences.push(stepSentence);
|
|
760
|
+
// Add input/output info
|
|
761
|
+
if (step.inputsReferenced && step.inputsReferenced.length > 0) {
|
|
762
|
+
sentences.push(` → Uses inputs: ${step.inputsReferenced.join(', ')}`);
|
|
763
|
+
}
|
|
764
|
+
if (step.outputs && Object.keys(step.outputs).length > 0) {
|
|
765
|
+
sentences.push(` → Produces outputs: ${Object.keys(step.outputs).join(', ')}`);
|
|
766
|
+
}
|
|
767
|
+
});
|
|
768
|
+
// Expected outputs sentence
|
|
769
|
+
if (explanation.outputs && Object.keys(explanation.outputs).length > 0) {
|
|
770
|
+
const outputKeys = Object.keys(explanation.outputs);
|
|
771
|
+
sentences.push(`\nExpected workflow outputs: ${outputKeys.join(', ')}.`);
|
|
772
|
+
}
|
|
773
|
+
return sentences;
|
|
774
|
+
}
|
|
775
|
+
/**
|
|
776
|
+
* Infer workflow type from explanation
|
|
777
|
+
*/
|
|
778
|
+
static inferWorkflowType(explanation) {
|
|
779
|
+
const adapters = explanation.adaptersUsed;
|
|
780
|
+
const name = explanation.workflowName?.toLowerCase() || '';
|
|
781
|
+
const description = explanation.description?.toLowerCase() || '';
|
|
782
|
+
// Analyze adapters and names/descriptions for type inference
|
|
783
|
+
if (adapters.includes('http') || adapters.includes('api')) {
|
|
784
|
+
return 'make API calls and process HTTP responses';
|
|
785
|
+
}
|
|
786
|
+
if (adapters.includes('db') || adapters.includes('database')) {
|
|
787
|
+
return 'interact with databases and manage data';
|
|
788
|
+
}
|
|
789
|
+
if (adapters.includes('shell') || adapters.includes('cli')) {
|
|
790
|
+
return 'execute shell commands and CLI operations';
|
|
791
|
+
}
|
|
792
|
+
if (adapters.includes('fs') || adapters.includes('file')) {
|
|
793
|
+
return 'perform file system operations';
|
|
794
|
+
}
|
|
795
|
+
if (name.includes('deploy') || description.includes('deploy')) {
|
|
796
|
+
return 'deploy applications and services';
|
|
797
|
+
}
|
|
798
|
+
if (name.includes('test') || description.includes('test')) {
|
|
799
|
+
return 'run tests and validate functionality';
|
|
800
|
+
}
|
|
801
|
+
if (name.includes('build') || description.includes('build')) {
|
|
802
|
+
return 'build and compile applications';
|
|
803
|
+
}
|
|
804
|
+
if (name.includes('backup') || description.includes('backup')) {
|
|
805
|
+
return 'backup data and resources';
|
|
806
|
+
}
|
|
807
|
+
if (name.includes('monitor') || description.includes('monitor')) {
|
|
808
|
+
return 'monitor systems and collect metrics';
|
|
809
|
+
}
|
|
810
|
+
// Default fallback
|
|
811
|
+
return 'automate workflow tasks';
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
//# sourceMappingURL=ExplanationGenerator.js.map
|