agentic-qe 2.5.6 → 2.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/n8n/n8n-base-agent.md +376 -0
- package/.claude/agents/n8n/n8n-bdd-scenario-tester.md +613 -0
- package/.claude/agents/n8n/n8n-chaos-tester.md +654 -0
- package/.claude/agents/n8n/n8n-ci-orchestrator.md +850 -0
- package/.claude/agents/n8n/n8n-compliance-validator.md +685 -0
- package/.claude/agents/n8n/n8n-expression-validator.md +560 -0
- package/.claude/agents/n8n/n8n-integration-test.md +602 -0
- package/.claude/agents/n8n/n8n-monitoring-validator.md +589 -0
- package/.claude/agents/n8n/n8n-node-validator.md +455 -0
- package/.claude/agents/n8n/n8n-performance-tester.md +630 -0
- package/.claude/agents/n8n/n8n-security-auditor.md +786 -0
- package/.claude/agents/n8n/n8n-trigger-test.md +500 -0
- package/.claude/agents/n8n/n8n-unit-tester.md +633 -0
- package/.claude/agents/n8n/n8n-version-comparator.md +567 -0
- package/.claude/agents/n8n/n8n-workflow-executor.md +392 -0
- package/.claude/skills/n8n-expression-testing/SKILL.md +434 -0
- package/.claude/skills/n8n-integration-testing-patterns/SKILL.md +540 -0
- package/.claude/skills/n8n-security-testing/SKILL.md +599 -0
- package/.claude/skills/n8n-trigger-testing-strategies/SKILL.md +541 -0
- package/.claude/skills/n8n-workflow-testing-fundamentals/SKILL.md +447 -0
- package/CHANGELOG.md +41 -0
- package/README.md +7 -4
- package/dist/agents/n8n/N8nAPIClient.d.ts +121 -0
- package/dist/agents/n8n/N8nAPIClient.d.ts.map +1 -0
- package/dist/agents/n8n/N8nAPIClient.js +367 -0
- package/dist/agents/n8n/N8nAPIClient.js.map +1 -0
- package/dist/agents/n8n/N8nAuditPersistence.d.ts +120 -0
- package/dist/agents/n8n/N8nAuditPersistence.d.ts.map +1 -0
- package/dist/agents/n8n/N8nAuditPersistence.js +473 -0
- package/dist/agents/n8n/N8nAuditPersistence.js.map +1 -0
- package/dist/agents/n8n/N8nBDDScenarioTesterAgent.d.ts +159 -0
- package/dist/agents/n8n/N8nBDDScenarioTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nBDDScenarioTesterAgent.js +697 -0
- package/dist/agents/n8n/N8nBDDScenarioTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nBaseAgent.d.ts +126 -0
- package/dist/agents/n8n/N8nBaseAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nBaseAgent.js +446 -0
- package/dist/agents/n8n/N8nBaseAgent.js.map +1 -0
- package/dist/agents/n8n/N8nCIOrchestratorAgent.d.ts +164 -0
- package/dist/agents/n8n/N8nCIOrchestratorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nCIOrchestratorAgent.js +610 -0
- package/dist/agents/n8n/N8nCIOrchestratorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nChaosTesterAgent.d.ts +205 -0
- package/dist/agents/n8n/N8nChaosTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nChaosTesterAgent.js +729 -0
- package/dist/agents/n8n/N8nChaosTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nComplianceValidatorAgent.d.ts +228 -0
- package/dist/agents/n8n/N8nComplianceValidatorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nComplianceValidatorAgent.js +986 -0
- package/dist/agents/n8n/N8nComplianceValidatorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nContractTesterAgent.d.ts +213 -0
- package/dist/agents/n8n/N8nContractTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nContractTesterAgent.js +989 -0
- package/dist/agents/n8n/N8nContractTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nExpressionValidatorAgent.d.ts +99 -0
- package/dist/agents/n8n/N8nExpressionValidatorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nExpressionValidatorAgent.js +632 -0
- package/dist/agents/n8n/N8nExpressionValidatorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nFailureModeTesterAgent.d.ts +238 -0
- package/dist/agents/n8n/N8nFailureModeTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nFailureModeTesterAgent.js +956 -0
- package/dist/agents/n8n/N8nFailureModeTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nIdempotencyTesterAgent.d.ts +242 -0
- package/dist/agents/n8n/N8nIdempotencyTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nIdempotencyTesterAgent.js +992 -0
- package/dist/agents/n8n/N8nIdempotencyTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nIntegrationTestAgent.d.ts +104 -0
- package/dist/agents/n8n/N8nIntegrationTestAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nIntegrationTestAgent.js +653 -0
- package/dist/agents/n8n/N8nIntegrationTestAgent.js.map +1 -0
- package/dist/agents/n8n/N8nMonitoringValidatorAgent.d.ts +210 -0
- package/dist/agents/n8n/N8nMonitoringValidatorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nMonitoringValidatorAgent.js +669 -0
- package/dist/agents/n8n/N8nMonitoringValidatorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nNodeValidatorAgent.d.ts +142 -0
- package/dist/agents/n8n/N8nNodeValidatorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nNodeValidatorAgent.js +1090 -0
- package/dist/agents/n8n/N8nNodeValidatorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nPerformanceTesterAgent.d.ts +198 -0
- package/dist/agents/n8n/N8nPerformanceTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nPerformanceTesterAgent.js +653 -0
- package/dist/agents/n8n/N8nPerformanceTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nReplayabilityTesterAgent.d.ts +245 -0
- package/dist/agents/n8n/N8nReplayabilityTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nReplayabilityTesterAgent.js +952 -0
- package/dist/agents/n8n/N8nReplayabilityTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nSecretsHygieneAuditorAgent.d.ts +325 -0
- package/dist/agents/n8n/N8nSecretsHygieneAuditorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nSecretsHygieneAuditorAgent.js +1187 -0
- package/dist/agents/n8n/N8nSecretsHygieneAuditorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nSecurityAuditorAgent.d.ts +91 -0
- package/dist/agents/n8n/N8nSecurityAuditorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nSecurityAuditorAgent.js +825 -0
- package/dist/agents/n8n/N8nSecurityAuditorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nTestHarness.d.ts +131 -0
- package/dist/agents/n8n/N8nTestHarness.d.ts.map +1 -0
- package/dist/agents/n8n/N8nTestHarness.js +456 -0
- package/dist/agents/n8n/N8nTestHarness.js.map +1 -0
- package/dist/agents/n8n/N8nTriggerTestAgent.d.ts +119 -0
- package/dist/agents/n8n/N8nTriggerTestAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nTriggerTestAgent.js +652 -0
- package/dist/agents/n8n/N8nTriggerTestAgent.js.map +1 -0
- package/dist/agents/n8n/N8nUnitTesterAgent.d.ts +130 -0
- package/dist/agents/n8n/N8nUnitTesterAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nUnitTesterAgent.js +522 -0
- package/dist/agents/n8n/N8nUnitTesterAgent.js.map +1 -0
- package/dist/agents/n8n/N8nVersionComparatorAgent.d.ts +201 -0
- package/dist/agents/n8n/N8nVersionComparatorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nVersionComparatorAgent.js +645 -0
- package/dist/agents/n8n/N8nVersionComparatorAgent.js.map +1 -0
- package/dist/agents/n8n/N8nWorkflowExecutorAgent.d.ts +120 -0
- package/dist/agents/n8n/N8nWorkflowExecutorAgent.d.ts.map +1 -0
- package/dist/agents/n8n/N8nWorkflowExecutorAgent.js +347 -0
- package/dist/agents/n8n/N8nWorkflowExecutorAgent.js.map +1 -0
- package/dist/agents/n8n/index.d.ts +119 -0
- package/dist/agents/n8n/index.d.ts.map +1 -0
- package/dist/agents/n8n/index.js +298 -0
- package/dist/agents/n8n/index.js.map +1 -0
- package/dist/agents/n8n/types.d.ts +486 -0
- package/dist/agents/n8n/types.d.ts.map +1 -0
- package/dist/agents/n8n/types.js +8 -0
- package/dist/agents/n8n/types.js.map +1 -0
- package/dist/cli/init/agents.d.ts.map +1 -1
- package/dist/cli/init/agents.js +29 -0
- package/dist/cli/init/agents.js.map +1 -1
- package/dist/cli/init/skills.d.ts.map +1 -1
- package/dist/cli/init/skills.js +7 -1
- package/dist/cli/init/skills.js.map +1 -1
- package/dist/core/memory/HNSWVectorMemory.js +1 -1
- package/dist/mcp/server-instructions.d.ts +1 -1
- package/dist/mcp/server-instructions.js +1 -1
- package/docs/reference/agents.md +91 -2
- package/docs/reference/skills.md +97 -2
- package/package.json +2 -2
|
@@ -0,0 +1,653 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* N8nPerformanceTesterAgent
|
|
4
|
+
*
|
|
5
|
+
* Performance testing for n8n workflows:
|
|
6
|
+
* - Execution time benchmarking
|
|
7
|
+
* - Load testing simulation
|
|
8
|
+
* - Memory usage tracking
|
|
9
|
+
* - Bottleneck identification
|
|
10
|
+
* - Performance regression detection
|
|
11
|
+
*/
|
|
12
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
13
|
+
exports.N8nPerformanceTesterAgent = void 0;
|
|
14
|
+
const N8nBaseAgent_1 = require("./N8nBaseAgent");
|
|
15
|
+
class N8nPerformanceTesterAgent extends N8nBaseAgent_1.N8nBaseAgent {
|
|
16
|
+
constructor(config) {
|
|
17
|
+
const capabilities = [
|
|
18
|
+
{
|
|
19
|
+
name: 'execution-benchmarking',
|
|
20
|
+
version: '1.0.0',
|
|
21
|
+
description: 'Benchmark workflow execution times',
|
|
22
|
+
parameters: {},
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
name: 'load-testing',
|
|
26
|
+
version: '1.0.0',
|
|
27
|
+
description: 'Simulate concurrent load on workflows',
|
|
28
|
+
parameters: {},
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
name: 'bottleneck-detection',
|
|
32
|
+
version: '1.0.0',
|
|
33
|
+
description: 'Identify performance bottlenecks',
|
|
34
|
+
parameters: {},
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
name: 'regression-detection',
|
|
38
|
+
version: '1.0.0',
|
|
39
|
+
description: 'Detect performance regressions',
|
|
40
|
+
parameters: {},
|
|
41
|
+
},
|
|
42
|
+
];
|
|
43
|
+
super({
|
|
44
|
+
...config,
|
|
45
|
+
type: 'n8n-performance-tester',
|
|
46
|
+
capabilities: [...capabilities, ...(config.capabilities || [])],
|
|
47
|
+
});
|
|
48
|
+
this.baselines = new Map();
|
|
49
|
+
}
|
|
50
|
+
async performTask(task) {
|
|
51
|
+
const perfTask = task;
|
|
52
|
+
if (perfTask.type !== 'performance-test') {
|
|
53
|
+
throw new Error(`Unsupported task type: ${perfTask.type}`);
|
|
54
|
+
}
|
|
55
|
+
return this.runPerformanceTest(perfTask.target, perfTask.options);
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Run comprehensive performance test
|
|
59
|
+
*/
|
|
60
|
+
async runPerformanceTest(workflowId, options) {
|
|
61
|
+
const iterations = options?.iterations || 10;
|
|
62
|
+
const concurrency = options?.concurrency || 1;
|
|
63
|
+
const warmupIterations = options?.warmupIterations || 2;
|
|
64
|
+
const workflow = await this.getWorkflow(workflowId);
|
|
65
|
+
// Warmup phase
|
|
66
|
+
await this.runWarmup(workflowId, warmupIterations);
|
|
67
|
+
// Main test phase
|
|
68
|
+
const executionResults = await this.runIterations(workflowId, iterations, concurrency, options?.timeout);
|
|
69
|
+
// Calculate metrics
|
|
70
|
+
const metrics = this.calculateMetrics(executionResults);
|
|
71
|
+
const nodeMetrics = this.analyzeNodePerformance(workflow, executionResults);
|
|
72
|
+
const bottlenecks = this.identifyBottlenecks(workflow, nodeMetrics);
|
|
73
|
+
// Baseline comparison
|
|
74
|
+
let baselineComparison;
|
|
75
|
+
if (options?.compareBaseline) {
|
|
76
|
+
baselineComparison = this.compareWithBaseline(options.compareBaseline, metrics);
|
|
77
|
+
}
|
|
78
|
+
// Record new baseline if requested
|
|
79
|
+
if (options?.recordBaseline) {
|
|
80
|
+
this.recordBaseline(workflowId, metrics);
|
|
81
|
+
}
|
|
82
|
+
// Generate recommendations
|
|
83
|
+
const recommendations = this.generateRecommendations(workflow, metrics, bottlenecks);
|
|
84
|
+
// Determine pass/fail
|
|
85
|
+
const passed = this.evaluateTargetMetrics(metrics, options?.targetMetrics);
|
|
86
|
+
const result = {
|
|
87
|
+
workflowId,
|
|
88
|
+
testConfig: {
|
|
89
|
+
iterations,
|
|
90
|
+
concurrency,
|
|
91
|
+
warmupIterations,
|
|
92
|
+
},
|
|
93
|
+
metrics,
|
|
94
|
+
nodeMetrics,
|
|
95
|
+
bottlenecks,
|
|
96
|
+
baselineComparison,
|
|
97
|
+
recommendations,
|
|
98
|
+
passed,
|
|
99
|
+
};
|
|
100
|
+
// Store result
|
|
101
|
+
await this.storeTestResult(`performance-test:${workflowId}`, result);
|
|
102
|
+
// Emit event
|
|
103
|
+
this.emitEvent('performance.test.completed', {
|
|
104
|
+
workflowId,
|
|
105
|
+
meanDuration: metrics.timing.mean,
|
|
106
|
+
p95Duration: metrics.timing.p95,
|
|
107
|
+
bottleneckCount: bottlenecks.length,
|
|
108
|
+
passed,
|
|
109
|
+
});
|
|
110
|
+
return result;
|
|
111
|
+
}
|
|
112
|
+
/**
|
|
113
|
+
* Run warmup iterations
|
|
114
|
+
*/
|
|
115
|
+
async runWarmup(workflowId, iterations) {
|
|
116
|
+
for (let i = 0; i < iterations; i++) {
|
|
117
|
+
try {
|
|
118
|
+
await this.executeWorkflow(workflowId, {});
|
|
119
|
+
}
|
|
120
|
+
catch {
|
|
121
|
+
// Warmup failures are acceptable
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Run test iterations
|
|
127
|
+
*/
|
|
128
|
+
async runIterations(workflowId, iterations, concurrency, timeout) {
|
|
129
|
+
const results = [];
|
|
130
|
+
if (concurrency === 1) {
|
|
131
|
+
// Sequential execution
|
|
132
|
+
for (let i = 0; i < iterations; i++) {
|
|
133
|
+
const result = await this.runSingleIteration(workflowId, timeout);
|
|
134
|
+
results.push(result);
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
// Concurrent execution
|
|
139
|
+
const batches = Math.ceil(iterations / concurrency);
|
|
140
|
+
for (let batch = 0; batch < batches; batch++) {
|
|
141
|
+
const batchSize = Math.min(concurrency, iterations - batch * concurrency);
|
|
142
|
+
const promises = Array(batchSize)
|
|
143
|
+
.fill(null)
|
|
144
|
+
.map(() => this.runSingleIteration(workflowId, timeout));
|
|
145
|
+
const batchResults = await Promise.all(promises);
|
|
146
|
+
results.push(...batchResults);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
return results;
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* Run single iteration
|
|
153
|
+
*/
|
|
154
|
+
async runSingleIteration(workflowId, timeout) {
|
|
155
|
+
const startTime = Date.now();
|
|
156
|
+
const startMemory = process.memoryUsage().heapUsed;
|
|
157
|
+
try {
|
|
158
|
+
const execution = await this.executeWorkflow(workflowId, {});
|
|
159
|
+
const duration = Date.now() - startTime;
|
|
160
|
+
const endMemory = process.memoryUsage().heapUsed;
|
|
161
|
+
return {
|
|
162
|
+
success: execution.status === 'success',
|
|
163
|
+
duration,
|
|
164
|
+
memoryDelta: endMemory - startMemory,
|
|
165
|
+
nodeTimings: this.extractNodeTimings(execution),
|
|
166
|
+
itemsProcessed: this.countItemsProcessed(execution),
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
catch (error) {
|
|
170
|
+
return {
|
|
171
|
+
success: false,
|
|
172
|
+
duration: Date.now() - startTime,
|
|
173
|
+
memoryDelta: 0,
|
|
174
|
+
nodeTimings: [],
|
|
175
|
+
itemsProcessed: 0,
|
|
176
|
+
error: error instanceof Error ? error.message : String(error),
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Extract REAL node timings from execution data
|
|
182
|
+
* Parses n8n execution runData to get actual per-node execution times
|
|
183
|
+
*/
|
|
184
|
+
extractNodeTimings(execution) {
|
|
185
|
+
if (!execution.data?.resultData?.runData) {
|
|
186
|
+
return [];
|
|
187
|
+
}
|
|
188
|
+
const timings = [];
|
|
189
|
+
const runData = execution.data.resultData.runData;
|
|
190
|
+
for (const [nodeName, nodeRuns] of Object.entries(runData)) {
|
|
191
|
+
if (!Array.isArray(nodeRuns) || nodeRuns.length === 0) {
|
|
192
|
+
continue;
|
|
193
|
+
}
|
|
194
|
+
// Aggregate timing across all runs of this node
|
|
195
|
+
let totalDuration = 0;
|
|
196
|
+
let runCount = 0;
|
|
197
|
+
for (const run of nodeRuns) {
|
|
198
|
+
// n8n stores startTime and executionTime in each run
|
|
199
|
+
if (run.executionTime !== undefined) {
|
|
200
|
+
totalDuration += run.executionTime;
|
|
201
|
+
runCount++;
|
|
202
|
+
}
|
|
203
|
+
else if (run.startTime) {
|
|
204
|
+
// Calculate from timestamps if executionTime not available
|
|
205
|
+
const startTime = new Date(run.startTime).getTime();
|
|
206
|
+
// Use the run data timestamp or estimate based on available data
|
|
207
|
+
const outputJson = run.data?.main?.[0]?.[0]?.json;
|
|
208
|
+
const endTime = outputJson && typeof outputJson['_timestamp'] === 'string'
|
|
209
|
+
? new Date(outputJson['_timestamp']).getTime()
|
|
210
|
+
: startTime + 100; // Estimate if no end time available
|
|
211
|
+
totalDuration += endTime - startTime;
|
|
212
|
+
runCount++;
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
if (runCount > 0) {
|
|
216
|
+
timings.push({
|
|
217
|
+
nodeName,
|
|
218
|
+
duration: totalDuration,
|
|
219
|
+
averageDuration: totalDuration / runCount,
|
|
220
|
+
runCount,
|
|
221
|
+
});
|
|
222
|
+
}
|
|
223
|
+
else {
|
|
224
|
+
// Fall back to estimating based on data volume processed
|
|
225
|
+
const itemCount = this.countNodeItems(nodeRuns);
|
|
226
|
+
const estimatedDuration = Math.max(1, itemCount * 0.5); // 0.5ms per item baseline
|
|
227
|
+
timings.push({
|
|
228
|
+
nodeName,
|
|
229
|
+
duration: estimatedDuration,
|
|
230
|
+
averageDuration: estimatedDuration,
|
|
231
|
+
runCount: 1,
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
return timings;
|
|
236
|
+
}
|
|
237
|
+
/**
|
|
238
|
+
* Count items processed by a node
|
|
239
|
+
*/
|
|
240
|
+
countNodeItems(nodeRuns) {
|
|
241
|
+
let count = 0;
|
|
242
|
+
for (const run of nodeRuns) {
|
|
243
|
+
const runData = run;
|
|
244
|
+
if (runData.data?.main) {
|
|
245
|
+
for (const output of runData.data.main) {
|
|
246
|
+
count += output?.length || 0;
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
return count;
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Count items processed in execution
|
|
254
|
+
*/
|
|
255
|
+
countItemsProcessed(execution) {
|
|
256
|
+
if (!execution.data?.resultData?.runData)
|
|
257
|
+
return 0;
|
|
258
|
+
let count = 0;
|
|
259
|
+
for (const nodeData of Object.values(execution.data.resultData.runData)) {
|
|
260
|
+
if (Array.isArray(nodeData)) {
|
|
261
|
+
for (const run of nodeData) {
|
|
262
|
+
if (run.data?.main) {
|
|
263
|
+
for (const output of run.data.main) {
|
|
264
|
+
count += output?.length || 0;
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
return count;
|
|
271
|
+
}
|
|
272
|
+
/**
|
|
273
|
+
* Calculate performance metrics
|
|
274
|
+
*/
|
|
275
|
+
calculateMetrics(results) {
|
|
276
|
+
const successfulResults = results.filter(r => r.success);
|
|
277
|
+
const durations = successfulResults.map(r => r.duration).sort((a, b) => a - b);
|
|
278
|
+
if (durations.length === 0) {
|
|
279
|
+
return {
|
|
280
|
+
totalIterations: results.length,
|
|
281
|
+
successfulIterations: 0,
|
|
282
|
+
failedIterations: results.length,
|
|
283
|
+
timing: {
|
|
284
|
+
min: 0,
|
|
285
|
+
max: 0,
|
|
286
|
+
mean: 0,
|
|
287
|
+
median: 0,
|
|
288
|
+
p95: 0,
|
|
289
|
+
p99: 0,
|
|
290
|
+
stdDev: 0,
|
|
291
|
+
},
|
|
292
|
+
throughput: {
|
|
293
|
+
executionsPerSecond: 0,
|
|
294
|
+
itemsPerSecond: 0,
|
|
295
|
+
},
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
const mean = durations.reduce((a, b) => a + b, 0) / durations.length;
|
|
299
|
+
const median = this.percentile(durations, 50);
|
|
300
|
+
const p95 = this.percentile(durations, 95);
|
|
301
|
+
const p99 = this.percentile(durations, 99);
|
|
302
|
+
const variance = durations.reduce((sum, d) => sum + Math.pow(d - mean, 2), 0) / durations.length;
|
|
303
|
+
const stdDev = Math.sqrt(variance);
|
|
304
|
+
const totalDuration = durations.reduce((a, b) => a + b, 0);
|
|
305
|
+
const totalItems = successfulResults.reduce((sum, r) => sum + r.itemsProcessed, 0);
|
|
306
|
+
return {
|
|
307
|
+
totalIterations: results.length,
|
|
308
|
+
successfulIterations: successfulResults.length,
|
|
309
|
+
failedIterations: results.length - successfulResults.length,
|
|
310
|
+
timing: {
|
|
311
|
+
min: durations[0],
|
|
312
|
+
max: durations[durations.length - 1],
|
|
313
|
+
mean,
|
|
314
|
+
median,
|
|
315
|
+
p95,
|
|
316
|
+
p99,
|
|
317
|
+
stdDev,
|
|
318
|
+
},
|
|
319
|
+
throughput: {
|
|
320
|
+
executionsPerSecond: (successfulResults.length / totalDuration) * 1000,
|
|
321
|
+
itemsPerSecond: (totalItems / totalDuration) * 1000,
|
|
322
|
+
},
|
|
323
|
+
memory: {
|
|
324
|
+
peak: Math.max(...successfulResults.map(r => r.memoryDelta)),
|
|
325
|
+
average: successfulResults.reduce((sum, r) => sum + r.memoryDelta, 0) / successfulResults.length,
|
|
326
|
+
},
|
|
327
|
+
};
|
|
328
|
+
}
|
|
329
|
+
/**
|
|
330
|
+
* Calculate percentile
|
|
331
|
+
*/
|
|
332
|
+
percentile(arr, p) {
|
|
333
|
+
if (arr.length === 0)
|
|
334
|
+
return 0;
|
|
335
|
+
const index = Math.ceil((p / 100) * arr.length) - 1;
|
|
336
|
+
return arr[Math.max(0, Math.min(index, arr.length - 1))];
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Analyze node-level performance
|
|
340
|
+
*/
|
|
341
|
+
analyzeNodePerformance(workflow, results) {
|
|
342
|
+
const nodeMetrics = [];
|
|
343
|
+
const successfulResults = results.filter(r => r.success);
|
|
344
|
+
if (successfulResults.length === 0) {
|
|
345
|
+
return [];
|
|
346
|
+
}
|
|
347
|
+
// Aggregate REAL node timings across all successful executions
|
|
348
|
+
const nodeTimingAggregates = new Map();
|
|
349
|
+
// First pass: collect all timings from execution results
|
|
350
|
+
for (const result of successfulResults) {
|
|
351
|
+
for (const timing of result.nodeTimings) {
|
|
352
|
+
const existing = nodeTimingAggregates.get(timing.nodeName);
|
|
353
|
+
if (existing) {
|
|
354
|
+
existing.durations.push(timing.duration);
|
|
355
|
+
existing.runCounts.push(timing.runCount);
|
|
356
|
+
}
|
|
357
|
+
else {
|
|
358
|
+
// Find the node in workflow to get type and id
|
|
359
|
+
const node = workflow.nodes.find(n => n.name === timing.nodeName);
|
|
360
|
+
nodeTimingAggregates.set(timing.nodeName, {
|
|
361
|
+
durations: [timing.duration],
|
|
362
|
+
runCounts: [timing.runCount],
|
|
363
|
+
nodeType: node?.type || 'unknown',
|
|
364
|
+
nodeId: node?.id || timing.nodeName,
|
|
365
|
+
});
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
// Calculate total execution time for percentage calculations
|
|
370
|
+
const totalMeanTime = successfulResults.reduce((sum, r) => sum + r.duration, 0) / successfulResults.length;
|
|
371
|
+
// Second pass: calculate aggregate metrics for each node
|
|
372
|
+
for (const [nodeName, aggregate] of nodeTimingAggregates) {
|
|
373
|
+
const durations = aggregate.durations.sort((a, b) => a - b);
|
|
374
|
+
const mean = durations.reduce((a, b) => a + b, 0) / durations.length;
|
|
375
|
+
const percentOfTotal = (mean / totalMeanTime) * 100;
|
|
376
|
+
nodeMetrics.push({
|
|
377
|
+
nodeId: aggregate.nodeId,
|
|
378
|
+
nodeName,
|
|
379
|
+
nodeType: aggregate.nodeType,
|
|
380
|
+
executionCount: aggregate.runCounts.reduce((a, b) => a + b, 0),
|
|
381
|
+
timing: {
|
|
382
|
+
min: durations[0] || 0,
|
|
383
|
+
max: durations[durations.length - 1] || 0,
|
|
384
|
+
mean,
|
|
385
|
+
percentOfTotal,
|
|
386
|
+
},
|
|
387
|
+
isBottleneck: percentOfTotal > 30, // Node taking >30% is a bottleneck
|
|
388
|
+
});
|
|
389
|
+
}
|
|
390
|
+
// If no timing data was found, fall back to estimation for nodes we know exist
|
|
391
|
+
if (nodeMetrics.length === 0) {
|
|
392
|
+
for (const node of workflow.nodes) {
|
|
393
|
+
const estimatedTime = this.estimateNodeTime(node, totalMeanTime);
|
|
394
|
+
const percentOfTotal = (estimatedTime / totalMeanTime) * 100;
|
|
395
|
+
nodeMetrics.push({
|
|
396
|
+
nodeId: node.id,
|
|
397
|
+
nodeName: node.name,
|
|
398
|
+
nodeType: node.type,
|
|
399
|
+
executionCount: successfulResults.length,
|
|
400
|
+
timing: {
|
|
401
|
+
min: estimatedTime * 0.8,
|
|
402
|
+
max: estimatedTime * 1.5,
|
|
403
|
+
mean: estimatedTime,
|
|
404
|
+
percentOfTotal,
|
|
405
|
+
},
|
|
406
|
+
isBottleneck: percentOfTotal > 30,
|
|
407
|
+
});
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
return nodeMetrics.sort((a, b) => b.timing.mean - a.timing.mean);
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Estimate node execution time
|
|
414
|
+
*/
|
|
415
|
+
estimateNodeTime(node, totalTime) {
|
|
416
|
+
// Weight based on node type
|
|
417
|
+
const weights = {
|
|
418
|
+
'n8n-nodes-base.httpRequest': 0.4,
|
|
419
|
+
'n8n-nodes-base.postgres': 0.3,
|
|
420
|
+
'n8n-nodes-base.mysql': 0.3,
|
|
421
|
+
'n8n-nodes-base.mongodb': 0.3,
|
|
422
|
+
'n8n-nodes-base.code': 0.15,
|
|
423
|
+
'n8n-nodes-base.function': 0.15,
|
|
424
|
+
'n8n-nodes-base.if': 0.05,
|
|
425
|
+
'n8n-nodes-base.set': 0.05,
|
|
426
|
+
};
|
|
427
|
+
const weight = weights[node.type] || 0.1;
|
|
428
|
+
return totalTime * weight;
|
|
429
|
+
}
|
|
430
|
+
/**
|
|
431
|
+
* Identify performance bottlenecks
|
|
432
|
+
*/
|
|
433
|
+
identifyBottlenecks(workflow, nodeMetrics) {
|
|
434
|
+
const bottlenecks = [];
|
|
435
|
+
for (const metric of nodeMetrics) {
|
|
436
|
+
if (!metric.isBottleneck)
|
|
437
|
+
continue;
|
|
438
|
+
const node = workflow.nodes.find(n => n.id === metric.nodeId);
|
|
439
|
+
if (!node)
|
|
440
|
+
continue;
|
|
441
|
+
const { severity, reason, recommendation } = this.analyzeBottleneck(node, metric);
|
|
442
|
+
bottlenecks.push({
|
|
443
|
+
nodeId: metric.nodeId,
|
|
444
|
+
nodeName: metric.nodeName,
|
|
445
|
+
nodeType: metric.nodeType,
|
|
446
|
+
severity,
|
|
447
|
+
impact: metric.timing.percentOfTotal,
|
|
448
|
+
reason,
|
|
449
|
+
recommendation,
|
|
450
|
+
});
|
|
451
|
+
}
|
|
452
|
+
return bottlenecks.sort((a, b) => b.impact - a.impact);
|
|
453
|
+
}
|
|
454
|
+
/**
|
|
455
|
+
* Analyze bottleneck details
|
|
456
|
+
*/
|
|
457
|
+
analyzeBottleneck(node, metric) {
|
|
458
|
+
const impact = metric.timing.percentOfTotal;
|
|
459
|
+
let severity;
|
|
460
|
+
if (impact > 60)
|
|
461
|
+
severity = 'critical';
|
|
462
|
+
else if (impact > 40)
|
|
463
|
+
severity = 'high';
|
|
464
|
+
else if (impact > 30)
|
|
465
|
+
severity = 'medium';
|
|
466
|
+
else
|
|
467
|
+
severity = 'low';
|
|
468
|
+
// Type-specific analysis
|
|
469
|
+
if (node.type.includes('httpRequest')) {
|
|
470
|
+
return {
|
|
471
|
+
severity,
|
|
472
|
+
reason: 'External HTTP calls are slow',
|
|
473
|
+
recommendation: 'Consider caching responses, adding timeouts, or parallelizing requests',
|
|
474
|
+
};
|
|
475
|
+
}
|
|
476
|
+
if (node.type.includes('postgres') || node.type.includes('mysql')) {
|
|
477
|
+
return {
|
|
478
|
+
severity,
|
|
479
|
+
reason: 'Database operations taking significant time',
|
|
480
|
+
recommendation: 'Optimize queries, add indexes, or batch operations',
|
|
481
|
+
};
|
|
482
|
+
}
|
|
483
|
+
if (node.type.includes('code') || node.type.includes('function')) {
|
|
484
|
+
return {
|
|
485
|
+
severity,
|
|
486
|
+
reason: 'Custom code execution is slow',
|
|
487
|
+
recommendation: 'Optimize algorithms, reduce iterations, or use more efficient data structures',
|
|
488
|
+
};
|
|
489
|
+
}
|
|
490
|
+
return {
|
|
491
|
+
severity,
|
|
492
|
+
reason: `Node ${node.type} is consuming ${impact.toFixed(1)}% of execution time`,
|
|
493
|
+
recommendation: 'Review node configuration and consider optimization',
|
|
494
|
+
};
|
|
495
|
+
}
|
|
496
|
+
/**
|
|
497
|
+
* Compare with baseline
|
|
498
|
+
*/
|
|
499
|
+
compareWithBaseline(baselineId, current) {
|
|
500
|
+
const baseline = this.baselines.get(baselineId);
|
|
501
|
+
if (!baseline) {
|
|
502
|
+
return {
|
|
503
|
+
baselineId,
|
|
504
|
+
baselineDate: new Date(),
|
|
505
|
+
regressions: [],
|
|
506
|
+
improvements: [],
|
|
507
|
+
overallChange: 0,
|
|
508
|
+
};
|
|
509
|
+
}
|
|
510
|
+
const regressions = [];
|
|
511
|
+
const improvements = [];
|
|
512
|
+
// Compare mean duration
|
|
513
|
+
const meanChange = ((current.timing.mean - baseline.metrics.meanDuration) / baseline.metrics.meanDuration) * 100;
|
|
514
|
+
if (meanChange > 10) {
|
|
515
|
+
regressions.push({
|
|
516
|
+
metric: 'mean duration',
|
|
517
|
+
baseline: baseline.metrics.meanDuration,
|
|
518
|
+
current: current.timing.mean,
|
|
519
|
+
change: meanChange,
|
|
520
|
+
severity: meanChange > 50 ? 'severe' : meanChange > 25 ? 'moderate' : 'minor',
|
|
521
|
+
});
|
|
522
|
+
}
|
|
523
|
+
else if (meanChange < -10) {
|
|
524
|
+
improvements.push({
|
|
525
|
+
metric: 'mean duration',
|
|
526
|
+
baseline: baseline.metrics.meanDuration,
|
|
527
|
+
current: current.timing.mean,
|
|
528
|
+
change: Math.abs(meanChange),
|
|
529
|
+
});
|
|
530
|
+
}
|
|
531
|
+
// Compare p95
|
|
532
|
+
const p95Change = ((current.timing.p95 - baseline.metrics.p95Duration) / baseline.metrics.p95Duration) * 100;
|
|
533
|
+
if (p95Change > 10) {
|
|
534
|
+
regressions.push({
|
|
535
|
+
metric: 'p95 duration',
|
|
536
|
+
baseline: baseline.metrics.p95Duration,
|
|
537
|
+
current: current.timing.p95,
|
|
538
|
+
change: p95Change,
|
|
539
|
+
severity: p95Change > 50 ? 'severe' : p95Change > 25 ? 'moderate' : 'minor',
|
|
540
|
+
});
|
|
541
|
+
}
|
|
542
|
+
else if (p95Change < -10) {
|
|
543
|
+
improvements.push({
|
|
544
|
+
metric: 'p95 duration',
|
|
545
|
+
baseline: baseline.metrics.p95Duration,
|
|
546
|
+
current: current.timing.p95,
|
|
547
|
+
change: Math.abs(p95Change),
|
|
548
|
+
});
|
|
549
|
+
}
|
|
550
|
+
return {
|
|
551
|
+
baselineId,
|
|
552
|
+
baselineDate: baseline.createdAt,
|
|
553
|
+
regressions,
|
|
554
|
+
improvements,
|
|
555
|
+
overallChange: meanChange,
|
|
556
|
+
};
|
|
557
|
+
}
|
|
558
|
+
/**
|
|
559
|
+
* Record baseline
|
|
560
|
+
*/
|
|
561
|
+
recordBaseline(workflowId, metrics) {
|
|
562
|
+
const baselineId = `${workflowId}:${Date.now()}`;
|
|
563
|
+
this.baselines.set(workflowId, {
|
|
564
|
+
workflowId,
|
|
565
|
+
metrics: {
|
|
566
|
+
meanDuration: metrics.timing.mean,
|
|
567
|
+
p95Duration: metrics.timing.p95,
|
|
568
|
+
maxDuration: metrics.timing.max,
|
|
569
|
+
throughput: metrics.throughput.executionsPerSecond,
|
|
570
|
+
},
|
|
571
|
+
createdAt: new Date(),
|
|
572
|
+
version: '1.0',
|
|
573
|
+
});
|
|
574
|
+
}
|
|
575
|
+
/**
|
|
576
|
+
* Generate recommendations
|
|
577
|
+
*/
|
|
578
|
+
generateRecommendations(workflow, metrics, bottlenecks) {
|
|
579
|
+
const recommendations = [];
|
|
580
|
+
// High failure rate
|
|
581
|
+
if (metrics.failedIterations > metrics.totalIterations * 0.1) {
|
|
582
|
+
recommendations.push({
|
|
583
|
+
priority: 'high',
|
|
584
|
+
category: 'Reliability',
|
|
585
|
+
issue: `${((metrics.failedIterations / metrics.totalIterations) * 100).toFixed(1)}% failure rate`,
|
|
586
|
+
recommendation: 'Add error handling, retries, and timeout configurations',
|
|
587
|
+
expectedImprovement: 'Improved reliability and success rate',
|
|
588
|
+
});
|
|
589
|
+
}
|
|
590
|
+
// High variance
|
|
591
|
+
if (metrics.timing.stdDev > metrics.timing.mean * 0.5) {
|
|
592
|
+
recommendations.push({
|
|
593
|
+
priority: 'medium',
|
|
594
|
+
category: 'Consistency',
|
|
595
|
+
issue: 'High execution time variance',
|
|
596
|
+
recommendation: 'Investigate variable external dependencies and add timeouts',
|
|
597
|
+
expectedImprovement: 'More predictable execution times',
|
|
598
|
+
});
|
|
599
|
+
}
|
|
600
|
+
// Bottleneck recommendations
|
|
601
|
+
for (const bottleneck of bottlenecks.slice(0, 3)) {
|
|
602
|
+
recommendations.push({
|
|
603
|
+
priority: bottleneck.severity === 'critical' || bottleneck.severity === 'high' ? 'high' : 'medium',
|
|
604
|
+
category: 'Optimization',
|
|
605
|
+
issue: `${bottleneck.nodeName} (${bottleneck.nodeType}) taking ${bottleneck.impact.toFixed(1)}% of time`,
|
|
606
|
+
recommendation: bottleneck.recommendation,
|
|
607
|
+
expectedImprovement: `Up to ${(bottleneck.impact * 0.5).toFixed(0)}% performance improvement`,
|
|
608
|
+
});
|
|
609
|
+
}
|
|
610
|
+
// Memory recommendations
|
|
611
|
+
if (metrics.memory && metrics.memory.peak > 100 * 1024 * 1024) {
|
|
612
|
+
recommendations.push({
|
|
613
|
+
priority: 'medium',
|
|
614
|
+
category: 'Memory',
|
|
615
|
+
issue: `High peak memory usage: ${(metrics.memory.peak / 1024 / 1024).toFixed(1)}MB`,
|
|
616
|
+
recommendation: 'Process data in batches, avoid large in-memory operations',
|
|
617
|
+
expectedImprovement: 'Reduced memory footprint and improved stability',
|
|
618
|
+
});
|
|
619
|
+
}
|
|
620
|
+
return recommendations;
|
|
621
|
+
}
|
|
622
|
+
/**
|
|
623
|
+
* Evaluate target metrics
|
|
624
|
+
*/
|
|
625
|
+
evaluateTargetMetrics(metrics, targets) {
|
|
626
|
+
if (!targets)
|
|
627
|
+
return true;
|
|
628
|
+
if (targets.maxDuration && metrics.timing.mean > targets.maxDuration) {
|
|
629
|
+
return false;
|
|
630
|
+
}
|
|
631
|
+
if (targets.maxP95 && metrics.timing.p95 > targets.maxP95) {
|
|
632
|
+
return false;
|
|
633
|
+
}
|
|
634
|
+
if (targets.maxMemory && metrics.memory?.peak && metrics.memory.peak > targets.maxMemory) {
|
|
635
|
+
return false;
|
|
636
|
+
}
|
|
637
|
+
return true;
|
|
638
|
+
}
|
|
639
|
+
/**
|
|
640
|
+
* Get stored baseline
|
|
641
|
+
*/
|
|
642
|
+
getBaseline(workflowId) {
|
|
643
|
+
return this.baselines.get(workflowId);
|
|
644
|
+
}
|
|
645
|
+
/**
|
|
646
|
+
* Set baseline from external source
|
|
647
|
+
*/
|
|
648
|
+
setBaseline(workflowId, baseline) {
|
|
649
|
+
this.baselines.set(workflowId, baseline);
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
exports.N8nPerformanceTesterAgent = N8nPerformanceTesterAgent;
|
|
653
|
+
//# sourceMappingURL=N8nPerformanceTesterAgent.js.map
|