express-api-stress-tester 2.0.0 โ†’ 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,8 +14,10 @@
14
14
  - ๐Ÿ”€ **Distributed architecture** โ€” master/worker TCP coordination for horizontal scaling
15
15
  - ๐Ÿ“Š **Real-time terminal dashboard** โ€” live RPS, latency, error rate, CPU, and memory graphs
16
16
  - ๐ŸŽฏ **Multi-route testing** โ€” test multiple endpoints with weighted traffic distribution
17
+ - ๐Ÿงญ **Per-endpoint analytics** โ€” separate metrics per API route
17
18
  - ๐ŸŽญ **Scenario testing** โ€” simulate real user flows across sequential API calls
18
19
  - ๐Ÿ“ˆ **Advanced metrics** โ€” P95, P99, min, max latency with reservoir sampling
20
+ - ๐Ÿ“ถ **Adaptive load engine** โ€” ramp-up, ramp-down, target RPS, burst traffic
19
21
  - ๐Ÿ“ **Multi-format reports** โ€” TXT, JSON, and HTML reports with pass/fail status
20
22
  - ๐Ÿ”Œ **Plugin system** โ€” payload generators, auth providers, header providers, interceptors, and custom metrics collectors
21
23
  - ๐Ÿงช **Express integration** โ€” auto-detect routes and stress test Express apps directly
@@ -142,11 +144,19 @@ console.log(result);
142
144
  | `payloads` | array | โ€” | Bulk payloads โ€” array of objects distributed round-robin |
143
145
  | `payloadFile` | string | โ€” | Path to a CSV or JSON dataset file |
144
146
  | `concurrency` | number | `1` | Number of concurrent virtual users |
147
+ | `maxUsers` | number | โ€” | Alias for `concurrency` when using adaptive ramping |
148
+ | `startConcurrency` | number | `1` | Initial concurrency for ramp-up |
149
+ | `rampUp` | number | `0` | Ramp-up time in seconds (linear) |
150
+ | `rampDown` | number | `0` | Ramp-down time in seconds (linear) |
151
+ | `targetRPS` | number | โ€” | Adaptive target requests/sec |
152
+ | `adaptiveIntervalMs` | number | `1000` | Minimum interval between adaptive adjustments (ms) |
153
+ | `burst` | object | โ€” | Burst traffic config: `{ start, duration, multiplier, maxUsers }` |
145
154
  | `duration` | number | `10` | Test duration in seconds |
146
155
  | `routes` | array | โ€” | Array of route objects for multi-route testing |
147
156
  | `trafficDistribution` | array | โ€” | Weighted traffic distribution across routes |
148
157
  | `scenarios` | array | โ€” | Scenario definitions for user flow simulation |
149
158
  | `thresholds` | object | โ€” | Pass/fail thresholds |
159
+ | `plugins` | array | โ€” | Plugin module paths to load in worker threads |
150
160
 
151
161
  ### Single URL Config
152
162
 
@@ -203,6 +213,22 @@ Test multiple API endpoints simultaneously with optional weighted traffic distri
203
213
 
204
214
  ---
205
215
 
216
+ ## Per-Endpoint Metrics
217
+
218
+ When multiple routes or scenarios are tested, the report and dashboard include **separate metrics per API endpoint** (RPS, latency, error rate).
219
+
220
+ Example snippet from the TXT report:
221
+
222
+ ```
223
+ Per-Endpoint Metrics:
224
+ Endpoint RPS Avg(ms) P95(ms) Errors(%)
225
+ ----------------------------------------------------------------------------
226
+ GET /login 5200 120 210 0.5
227
+ POST /orders 3100 210 410 1.3
228
+ ```
229
+
230
+ ---
231
+
206
232
  ## Scenario Testing
207
233
 
208
234
  Simulate real user flows by defining sequential steps that execute in order.
@@ -234,6 +260,25 @@ Each virtual user executes the steps sequentially, simulating a realistic browsi
234
260
 
235
261
  ---
236
262
 
263
+ ## Adaptive Load & Burst Traffic
264
+
265
+ Use ramp-up, ramp-down, target RPS, and burst windows to shape traffic patterns.
266
+
267
+ ```json
268
+ {
269
+ "baseUrl": "https://api.example.com",
270
+ "routes": [{ "path": "/login", "method": "POST" }],
271
+ "maxUsers": 100000,
272
+ "startConcurrency": 1000,
273
+ "rampUp": 30,
274
+ "rampDown": 10,
275
+ "targetRPS": 50000,
276
+ "burst": { "start": 20, "duration": 5, "multiplier": 2 }
277
+ }
278
+ ```
279
+
280
+ ---
281
+
237
282
  ## Dynamic Payloads
238
283
 
239
284
  Use placeholders in your payload values. They are replaced with fresh random data for **every request**.
@@ -382,6 +427,16 @@ When thresholds are set, the summary `result` field returns `PASSED` or `FAILED`
382
427
 
383
428
  Scale horizontally across multiple machines using the built-in TCP-based master/worker coordination.
384
429
 
430
+ ### CLI Usage
431
+
432
+ ```bash
433
+ # Start master with config
434
+ npx express-api-stress-tester master config.json --port 7654 --workers 3
435
+
436
+ # Start workers (run on other machines)
437
+ npx express-api-stress-tester worker --host 127.0.0.1 --port 7654
438
+ ```
439
+
385
440
  ### Master Node
386
441
 
387
442
  ```js
@@ -547,6 +602,18 @@ const headers = authPlugins[0].handler();
547
602
  console.log(headers); // { Authorization: 'Bearer my-secret-token' }
548
603
  ```
549
604
 
605
+ ### Loading Plugins via Config
606
+
607
+ ```json
608
+ {
609
+ "url": "https://api.example.com/users",
610
+ "method": "GET",
611
+ "concurrency": 100,
612
+ "duration": 10,
613
+ "plugins": ["./plugins/authPlugin.js", "./plugins/requestLogger.js"]
614
+ }
615
+ ```
616
+
550
617
  ---
551
618
 
552
619
  ## Real-Time Dashboard
@@ -574,6 +641,7 @@ The dashboard displays:
574
641
  - Updates every 1 second
575
642
  - Color-coded indicators: ๐ŸŸข green (healthy), ๐ŸŸก yellow (warning), ๐Ÿ”ด red (critical)
576
643
  - 60-second RPS history with ASCII bar chart
644
+ - Per-endpoint table with live RPS, latency, and error rate
577
645
  - Clean exit on test completion
578
646
 
579
647
  ---
@@ -640,7 +708,15 @@ Result: PASSED
640
708
  "maxLatency": 320,
641
709
  "errorRate": 0.3,
642
710
  "successRate": 99.7,
643
- "result": "PASSED"
711
+ "result": "PASSED",
712
+ "perEndpoint": {
713
+ "GET /users": {
714
+ "requestsPerSec": 620,
715
+ "avgResponseTime": 35,
716
+ "p95": 80,
717
+ "errorRate": 0.2
718
+ }
719
+ }
644
720
  }
645
721
  }
646
722
  ```
@@ -652,6 +728,8 @@ The HTML report is a self-contained file with:
652
728
  - Status badge (โœ“ PASSED or โœ— FAILED)
653
729
  - Test configuration table
654
730
  - Results summary table with all metrics
731
+ - Latency, request rate, and error distribution charts
732
+ - Per-endpoint metrics table
655
733
  - Timestamp of generation
656
734
  - Embedded CSS โ€” no external dependencies
657
735
 
@@ -690,6 +768,7 @@ express-api-stress-tester/
690
768
  โ”‚ โ”‚
691
769
  โ”‚ โ”œโ”€โ”€ metrics/
692
770
  โ”‚ โ”‚ โ”œโ”€โ”€ metricsCollector.js # Metrics aggregation + percentiles
771
+ โ”‚ โ”‚ โ”œโ”€โ”€ apiMetrics.js # Per-endpoint metrics collector
693
772
  โ”‚ โ”‚ โ””โ”€โ”€ systemMetrics.js # CPU & memory monitoring
694
773
  โ”‚ โ”‚
695
774
  โ”‚ โ”œโ”€โ”€ reporting/
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "express-api-stress-tester",
3
- "version": "2.0.0",
3
+ "version": "2.0.2",
4
4
  "description": "High-performance distributed API stress testing platform for Express.js APIs โ€” simulate up to 10M concurrent virtual users",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/cli.js CHANGED
@@ -13,10 +13,12 @@ import { createRequire } from 'node:module';
13
13
  import { Command } from 'commander';
14
14
  import chalk from 'chalk';
15
15
  import { runStressTest } from './core/runner.js';
16
- import { CliDashboard } from './dashboard/cliDashboard.js';
16
+ import { MasterNode, WorkerNode } from './core/distributedCoordinator.js';
17
+ import { ReportWriter } from './reporting/reportWriter.js';
17
18
 
18
19
  const require = createRequire(import.meta.url);
19
20
  const pkg = require('../package.json');
21
+ const DEFAULT_MAX_ERROR_RATE_PERCENT = 5;
20
22
 
21
23
  function loadConfig(configPath) {
22
24
  const fullPath = resolve(configPath);
@@ -51,12 +53,6 @@ function printSummary(summary) {
51
53
  async function runCommand(configPath, opts) {
52
54
  const config = loadConfig(configPath);
53
55
 
54
- let dashboard = null;
55
- if (opts.dashboard) {
56
- dashboard = new CliDashboard();
57
- dashboard.start();
58
- }
59
-
60
56
  try {
61
57
  const summary = await runStressTest(config, {
62
58
  reportPath: opts.output || 'stress-test-report.txt',
@@ -64,19 +60,95 @@ async function runCommand(configPath, opts) {
64
60
  dashboard: opts.dashboard,
65
61
  });
66
62
 
67
- if (dashboard) {
68
- dashboard.stop();
69
- }
70
-
71
63
  printSummary(summary);
72
64
  process.exit(summary.result === 'PASSED' ? 0 : 1);
73
65
  } catch (err) {
74
- if (dashboard) dashboard.stop();
75
66
  console.error(chalk.red(`Stress test failed: ${err.message}`));
76
67
  process.exit(1);
77
68
  }
78
69
  }
79
70
 
71
+ async function masterCommand(configPath, opts) {
72
+ const config = loadConfig(configPath);
73
+ const master = new MasterNode({ port: Number(opts.port) || 7654 });
74
+ await master.start();
75
+ console.log(chalk.green(`Master listening on port ${master.port}`));
76
+
77
+ const expected = Number(opts.workers || config.distributed?.workers || 0);
78
+ if (expected > 0) {
79
+ await waitForWorkers(master, expected, opts.timeout ? Number(opts.timeout) * 1000 : 60_000);
80
+ }
81
+
82
+ const results = await master.distributeWork(config);
83
+ const summary = await master.collectResults(results);
84
+ summary.result = applyThresholds(summary, config.thresholds);
85
+
86
+ const reportPath = opts.output || 'stress-test-report.txt';
87
+ const reportFormat = opts.format || 'txt';
88
+ const writer = new ReportWriter(config, summary);
89
+ writer.write(reportPath, reportFormat);
90
+
91
+ printSummary(summary);
92
+ await master.stop();
93
+ process.exit(summary.result === 'PASSED' ? 0 : 1);
94
+ }
95
+
96
+ async function workerCommand(opts) {
97
+ const worker = new WorkerNode({
98
+ masterHost: opts.host || '127.0.0.1',
99
+ masterPort: Number(opts.port) || 7654,
100
+ });
101
+ await worker.connect();
102
+ console.log(chalk.green(`Worker connected to ${worker.masterHost}:${worker.masterPort}`));
103
+ await new Promise((resolve) => {
104
+ if (worker.socket) {
105
+ worker.socket.on('close', resolve);
106
+ } else {
107
+ resolve();
108
+ }
109
+ });
110
+ process.exit(0);
111
+ }
112
+
113
+ async function waitForWorkers(master, count, timeoutMs) {
114
+ const start = Date.now();
115
+ while (master.workers.size < count) {
116
+ if (timeoutMs && Date.now() - start > timeoutMs) {
117
+ throw new Error(`Timed out waiting for ${count} workers to connect`);
118
+ }
119
+ await new Promise((resolve) => setTimeout(resolve, 500));
120
+ }
121
+ }
122
+
123
+ function applyThresholds(summary, thresholds) {
124
+ if (!thresholds) {
125
+ return summary.errorRate < DEFAULT_MAX_ERROR_RATE_PERCENT ? 'PASSED' : 'FAILED';
126
+ }
127
+
128
+ if (
129
+ thresholds.maxErrorRate != null &&
130
+ summary.errorRate > thresholds.maxErrorRate
131
+ ) {
132
+ return 'FAILED';
133
+ }
134
+
135
+ if (
136
+ thresholds.maxAvgLatency != null &&
137
+ summary.avgResponseTime > thresholds.maxAvgLatency
138
+ ) {
139
+ return 'FAILED';
140
+ }
141
+
142
+ if (
143
+ thresholds.minRPS != null &&
144
+ summary.requestsPerSec < thresholds.minRPS
145
+ ) {
146
+ return 'FAILED';
147
+ }
148
+
149
+ return 'PASSED';
150
+ }
151
+
80
152
  async function main() {
81
153
  const program = new Command();
82
154
 
@@ -93,9 +165,35 @@ async function main() {
93
165
  .option('--output <path>', 'Report output file path')
94
166
  .action(runCommand);
95
167
 
168
+ program
169
+ .command('master <config>')
170
+ .description('Run a distributed master and coordinate connected workers')
171
+ .option('--port <port>', 'Master listen port', '7654')
172
+ .option('--workers <count>', 'Number of workers to wait for')
173
+ .option('--timeout <seconds>', 'Wait timeout for workers', '60')
174
+ .option('--format <format>', 'Report format: txt, json, html', 'txt')
175
+ .option('--output <path>', 'Report output file path')
176
+ .action(masterCommand);
177
+
178
+ program
179
+ .command('worker')
180
+ .description('Start a worker node and connect to a master')
181
+ .option('--host <host>', 'Master host', '127.0.0.1')
182
+ .option('--port <port>', 'Master port', '7654')
183
+ .action(workerCommand);
184
+
96
185
  // Backward compatibility: if first arg is not a known command, treat as config path
97
186
  const args = process.argv.slice(2);
98
- const knownCommands = ['run', 'help', '--help', '-h', '--version', '-V'];
187
+ const knownCommands = [
188
+ 'run',
189
+ 'master',
190
+ 'worker',
191
+ 'help',
192
+ '--help',
193
+ '-h',
194
+ '--version',
195
+ '-V',
196
+ ];
99
197
  if (args.length > 0 && !knownCommands.includes(args[0])) {
100
198
  // Rewrite argv to include 'run' subcommand
101
199
  const configArg = args[0];
@@ -150,6 +150,7 @@ export class MasterNode {
150
150
  errorRate: 0,
151
151
  successRate: 0,
152
152
  result: 'PASSED',
153
+ perEndpoint: {},
153
154
  };
154
155
 
155
156
  let totalResponseTimeWeighted = 0;
@@ -158,6 +159,9 @@ export class MasterNode {
158
159
  combined.totalRequests += r.totalRequests || 0;
159
160
  combined.requestsPerSec += r.requestsPerSec || 0;
160
161
  totalResponseTimeWeighted += (r.avgResponseTime || 0) * (r.totalRequests || 0);
162
+ if (r.perEndpoint) {
163
+ mergeEndpointSummaries(combined.perEndpoint, r.perEndpoint);
164
+ }
161
165
  }
162
166
 
163
167
  if (combined.totalRequests > 0) {
@@ -307,3 +311,51 @@ export class WorkerNode {
307
311
  });
308
312
  }
309
313
  }
314
+
315
+ function mergeEndpointSummaries(target, source) {
316
+ for (const [endpoint, metrics] of Object.entries(source)) {
317
+ if (!target[endpoint]) {
318
+ target[endpoint] = {
319
+ totalRequests: 0,
320
+ requestsPerSec: 0,
321
+ avgResponseTime: 0,
322
+ errorRate: 0,
323
+ successRate: 0,
324
+ p95: 0,
325
+ p99: 0,
326
+ minLatency: metrics.minLatency ?? 0,
327
+ maxLatency: metrics.maxLatency ?? 0,
328
+ };
329
+ }
330
+
331
+ const current = target[endpoint];
332
+ const totalBefore = current.totalRequests;
333
+ const totalAfter = totalBefore + (metrics.totalRequests || 0);
334
+
335
+ current.totalRequests = totalAfter;
336
+ current.requestsPerSec += metrics.requestsPerSec || 0;
337
+ current.avgResponseTime =
338
+ totalAfter > 0
339
+ ? Math.round(
340
+ ((current.avgResponseTime || 0) * totalBefore +
341
+ (metrics.avgResponseTime || 0) * (metrics.totalRequests || 0)) / totalAfter,
342
+ )
343
+ : 0;
344
+ current.errorRate =
345
+ totalAfter > 0
346
+ ? parseFloat(
347
+ (
348
+ (((current.errorRate || 0) / 100) * totalBefore +
349
+ ((metrics.errorRate || 0) / 100) * (metrics.totalRequests || 0)) /
350
+ totalAfter *
351
+ 100
352
+ ).toFixed(1),
353
+ )
354
+ : 0;
355
+ current.successRate = parseFloat((100 - current.errorRate).toFixed(1));
356
+ current.p95 = Math.max(current.p95 || 0, metrics.p95 || 0);
357
+ current.p99 = Math.max(current.p99 || 0, metrics.p99 || 0);
358
+ current.minLatency = Math.min(current.minLatency ?? Infinity, metrics.minLatency ?? Infinity);
359
+ current.maxLatency = Math.max(current.maxLatency ?? 0, metrics.maxLatency ?? 0);
360
+ }
361
+ }
@@ -6,6 +6,9 @@
6
6
  */
7
7
  import { Pool } from 'undici';
8
8
 
9
+ const CONTROL_CHARS_REGEX = /[\0\r\n]/g;
10
+ const MAX_WARNED_HEADER_VALUES = 100;
11
+
9
12
  export class HttpEngine {
10
13
  /**
11
14
  * @param {object} options
@@ -31,6 +34,7 @@ export class HttpEngine {
31
34
  this.baseUrl = baseUrl;
32
35
  this.defaultHeaders = headers;
33
36
  this.timeout = timeout;
37
+ this.invalidHeaderWarningCache = { map: new Map(), queue: [] };
34
38
 
35
39
  this.pool = new Pool(baseUrl, {
36
40
  connections,
@@ -53,7 +57,10 @@ export class HttpEngine {
53
57
  * @returns {Promise<{ statusCode: number, headers: object, body: string, responseTime: number }>}
54
58
  */
55
59
  async request({ method = 'GET', path = '/', headers = {}, body = null } = {}) {
56
- const mergedHeaders = { ...this.defaultHeaders, ...headers };
60
+ const mergedHeaders = normalizeHeaders(
61
+ { ...this.defaultHeaders, ...headers },
62
+ this.invalidHeaderWarningCache,
63
+ );
57
64
 
58
65
  const start = process.hrtime.bigint();
59
66
 
@@ -88,3 +95,95 @@ export class HttpEngine {
88
95
  await this.pool.close();
89
96
  }
90
97
  }
98
+
99
+ function normalizeHeaders(headers, warningCache) {
100
+ const normalized = {};
101
+ for (const [key, value] of Object.entries(headers || {})) {
102
+ if (value === undefined || value === null) {
103
+ continue;
104
+ }
105
+ const normalizedKey = normalizeHeaderKey(key);
106
+ if (normalizedKey === null) {
107
+ continue;
108
+ }
109
+ if (Array.isArray(value)) {
110
+ const cleaned = [];
111
+ for (const entry of value) {
112
+ if (entry === undefined || entry === null) {
113
+ continue;
114
+ }
115
+ if (isValidHeaderValue(entry)) {
116
+ const cleanedEntry = normalizeHeaderValue(entry);
117
+ if (cleanedEntry !== null) {
118
+ cleaned.push(cleanedEntry);
119
+ }
120
+ } else {
121
+ warnInvalidHeaderValue(normalizedKey, entry, warningCache);
122
+ }
123
+ }
124
+ if (cleaned.length > 0) {
125
+ normalized[normalizedKey] = cleaned;
126
+ }
127
+ continue;
128
+ }
129
+
130
+ if (!isValidHeaderValue(value)) {
131
+ warnInvalidHeaderValue(normalizedKey, value, warningCache);
132
+ continue;
133
+ }
134
+
135
+ const cleanedValue = normalizeHeaderValue(value);
136
+ if (cleanedValue === null) {
137
+ continue;
138
+ }
139
+ normalized[normalizedKey] = cleanedValue;
140
+ }
141
+ return normalized;
142
+ }
143
+
144
+ function normalizeHeaderKey(value) {
145
+ if (typeof value !== 'string') {
146
+ return null;
147
+ }
148
+ const cleaned = value.replace(CONTROL_CHARS_REGEX, '');
149
+ return cleaned.length > 0 ? cleaned : null;
150
+ }
151
+
152
+ function normalizeHeaderValue(value) {
153
+ if (typeof value === 'string') {
154
+ const cleaned = value.replace(CONTROL_CHARS_REGEX, '');
155
+ return cleaned.length > 0 ? cleaned : null;
156
+ }
157
+ if (typeof value === 'number' || typeof value === 'boolean') {
158
+ return String(value);
159
+ }
160
+ return null;
161
+ }
162
+
163
+ function isValidHeaderValue(value) {
164
+ return (
165
+ typeof value === 'string' ||
166
+ typeof value === 'number' ||
167
+ typeof value === 'boolean'
168
+ );
169
+ }
170
+
171
+ function warnInvalidHeaderValue(key, value, warningCache) {
172
+ const type = typeof value;
173
+ const safeKey = key;
174
+ const signature = `${safeKey}::${type}`;
175
+ if (warningCache.map.has(signature)) {
176
+ return;
177
+ }
178
+ warningCache.map.set(signature, true);
179
+ warningCache.queue.push(signature);
180
+ if (warningCache.queue.length > MAX_WARNED_HEADER_VALUES) {
181
+ const oldest = warningCache.queue.shift();
182
+ if (oldest) {
183
+ warningCache.map.delete(oldest);
184
+ }
185
+ }
186
+ process.stderr.write(
187
+ `[HttpEngine] Dropping header "${safeKey}" with unsupported value type "${type}".\n`,
188
+ );
189
+ }