@curl-runner/cli 1.13.0 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@curl-runner/cli",
3
- "version": "1.13.0",
3
+ "version": "1.14.0",
4
4
  "description": "A powerful CLI tool for HTTP request management using YAML configuration",
5
5
  "type": "module",
6
6
  "main": "./dist/cli.js",
package/src/cli.ts CHANGED
@@ -1,16 +1,19 @@
1
1
  #!/usr/bin/env bun
2
2
 
3
3
  import { Glob } from 'bun';
4
+ import { ProfileExecutor } from './executor/profile-executor';
4
5
  import { RequestExecutor } from './executor/request-executor';
5
6
  import { YamlParser } from './parser/yaml';
6
7
  import type {
7
8
  ExecutionResult,
8
9
  ExecutionSummary,
9
10
  GlobalConfig,
11
+ ProfileConfig,
10
12
  RequestConfig,
11
13
  WatchConfig,
12
14
  } from './types/config';
13
15
  import { Logger } from './utils/logger';
16
+ import { exportToCSV, exportToJSON } from './utils/stats';
14
17
  import { VersionChecker } from './utils/version-checker';
15
18
  import { getVersion } from './version';
16
19
  import { FileWatcher } from './watcher/file-watcher';
@@ -166,6 +169,49 @@ class CurlRunnerCLI {
166
169
  };
167
170
  }
168
171
 
172
+ // Profile mode configuration
173
+ if (process.env.CURL_RUNNER_PROFILE) {
174
+ const iterations = Number.parseInt(process.env.CURL_RUNNER_PROFILE, 10);
175
+ if (iterations > 0) {
176
+ envConfig.profile = {
177
+ ...envConfig.profile,
178
+ iterations,
179
+ };
180
+ }
181
+ }
182
+
183
+ if (process.env.CURL_RUNNER_PROFILE_WARMUP) {
184
+ envConfig.profile = {
185
+ ...envConfig.profile,
186
+ iterations: envConfig.profile?.iterations ?? 10,
187
+ warmup: Number.parseInt(process.env.CURL_RUNNER_PROFILE_WARMUP, 10),
188
+ };
189
+ }
190
+
191
+ if (process.env.CURL_RUNNER_PROFILE_CONCURRENCY) {
192
+ envConfig.profile = {
193
+ ...envConfig.profile,
194
+ iterations: envConfig.profile?.iterations ?? 10,
195
+ concurrency: Number.parseInt(process.env.CURL_RUNNER_PROFILE_CONCURRENCY, 10),
196
+ };
197
+ }
198
+
199
+ if (process.env.CURL_RUNNER_PROFILE_HISTOGRAM) {
200
+ envConfig.profile = {
201
+ ...envConfig.profile,
202
+ iterations: envConfig.profile?.iterations ?? 10,
203
+ histogram: process.env.CURL_RUNNER_PROFILE_HISTOGRAM.toLowerCase() === 'true',
204
+ };
205
+ }
206
+
207
+ if (process.env.CURL_RUNNER_PROFILE_EXPORT) {
208
+ envConfig.profile = {
209
+ ...envConfig.profile,
210
+ iterations: envConfig.profile?.iterations ?? 10,
211
+ exportFile: process.env.CURL_RUNNER_PROFILE_EXPORT,
212
+ };
213
+ }
214
+
169
215
  // Snapshot configuration
170
216
  if (process.env.CURL_RUNNER_SNAPSHOT) {
171
217
  envConfig.snapshot = {
@@ -378,7 +424,36 @@ class CurlRunnerCLI {
378
424
  // Check if watch mode is enabled
379
425
  const watchEnabled = options.watch || globalConfig.watch?.enabled;
380
426
 
381
- if (watchEnabled) {
427
+ // Check if profile mode is enabled (mutually exclusive with watch mode)
428
+ const profileIterations =
429
+ (options.profile as number | undefined) ?? globalConfig.profile?.iterations;
430
+ const profileEnabled = profileIterations && profileIterations > 0;
431
+
432
+ if (watchEnabled && profileEnabled) {
433
+ this.logger.logError('Profile mode and watch mode cannot be used together');
434
+ process.exit(1);
435
+ }
436
+
437
+ if (profileEnabled) {
438
+ // Profile mode - run requests multiple times for latency stats
439
+ const profileConfig: ProfileConfig = {
440
+ iterations: profileIterations,
441
+ warmup:
442
+ (options.profileWarmup as number | undefined) ?? globalConfig.profile?.warmup ?? 1,
443
+ concurrency:
444
+ (options.profileConcurrency as number | undefined) ??
445
+ globalConfig.profile?.concurrency ??
446
+ 1,
447
+ histogram:
448
+ (options.profileHistogram as boolean | undefined) ??
449
+ globalConfig.profile?.histogram ??
450
+ false,
451
+ exportFile:
452
+ (options.profileExport as string | undefined) ?? globalConfig.profile?.exportFile,
453
+ };
454
+
455
+ await this.executeProfileMode(allRequests, globalConfig, profileConfig);
456
+ } else if (watchEnabled) {
382
457
  // Build watch config from options and global config
383
458
  const watchConfig: WatchConfig = {
384
459
  enabled: true,
@@ -409,6 +484,49 @@ class CurlRunnerCLI {
409
484
  }
410
485
  }
411
486
 
487
+ private async executeProfileMode(
488
+ requests: RequestConfig[],
489
+ globalConfig: GlobalConfig,
490
+ profileConfig: ProfileConfig,
491
+ ): Promise<void> {
492
+ const profileExecutor = new ProfileExecutor(globalConfig, profileConfig);
493
+ const results = await profileExecutor.profileRequests(requests);
494
+
495
+ this.logger.logProfileSummary(results);
496
+
497
+ // Export results if requested
498
+ if (profileConfig.exportFile) {
499
+ const exportData: string[] = [];
500
+ const isCSV = profileConfig.exportFile.endsWith('.csv');
501
+
502
+ for (const result of results) {
503
+ const name = result.request.name || result.request.url;
504
+ if (isCSV) {
505
+ exportData.push(exportToCSV(result.stats, name));
506
+ } else {
507
+ exportData.push(exportToJSON(result.stats, name));
508
+ }
509
+ }
510
+
511
+ const content = isCSV ? exportData.join('\n\n') : `[${exportData.join(',\n')}]`;
512
+ await Bun.write(profileConfig.exportFile, content);
513
+ this.logger.logInfo(`Profile results exported to ${profileConfig.exportFile}`);
514
+ }
515
+
516
+ // Exit with code 1 if failure rate is high
517
+ const totalFailures = results.reduce((sum, r) => sum + r.stats.failures, 0);
518
+ const totalIterations = results.reduce(
519
+ (sum, r) => sum + r.stats.iterations + r.stats.warmup,
520
+ 0,
521
+ );
522
+
523
+ if (totalFailures > 0 && totalFailures / totalIterations > 0.5) {
524
+ process.exit(1);
525
+ }
526
+
527
+ process.exit(0);
528
+ }
529
+
412
530
  private async executeRequests(
413
531
  yamlFiles: string[],
414
532
  globalConfig: GlobalConfig,
@@ -509,6 +627,8 @@ class CurlRunnerCLI {
509
627
  options.watchClear = true;
510
628
  } else if (key === 'no-watch-clear') {
511
629
  options.watchClear = false;
630
+ } else if (key === 'profile-histogram') {
631
+ options.profileHistogram = true;
512
632
  } else if (key === 'snapshot') {
513
633
  options.snapshot = true;
514
634
  } else if (key === 'update-snapshots') {
@@ -550,6 +670,14 @@ class CurlRunnerCLI {
550
670
  }
551
671
  } else if (key === 'watch-debounce') {
552
672
  options.watchDebounce = Number.parseInt(nextArg, 10);
673
+ } else if (key === 'profile') {
674
+ options.profile = Number.parseInt(nextArg, 10);
675
+ } else if (key === 'profile-warmup') {
676
+ options.profileWarmup = Number.parseInt(nextArg, 10);
677
+ } else if (key === 'profile-concurrency') {
678
+ options.profileConcurrency = Number.parseInt(nextArg, 10);
679
+ } else if (key === 'profile-export') {
680
+ options.profileExport = nextArg;
553
681
  } else if (key === 'snapshot-dir') {
554
682
  options.snapshotDir = nextArg;
555
683
  } else {
@@ -596,6 +724,15 @@ class CurlRunnerCLI {
596
724
  }
597
725
  break;
598
726
  }
727
+ case 'P': {
728
+ // Handle -P flag for profile mode
729
+ const profileArg = args[i + 1];
730
+ if (profileArg && !profileArg.startsWith('-')) {
731
+ options.profile = Number.parseInt(profileArg, 10);
732
+ i++;
733
+ }
734
+ break;
735
+ }
599
736
  }
600
737
  }
601
738
  } else {
@@ -804,6 +941,13 @@ ${this.logger.color('WATCH MODE:', 'yellow')}
804
941
  --watch-debounce <ms> Debounce delay for watch mode (default: 300)
805
942
  --no-watch-clear Don't clear screen between watch runs
806
943
 
944
+ ${this.logger.color('PROFILE MODE:', 'yellow')}
945
+ -P, --profile <n> Run each request N times for latency stats
946
+ --profile-warmup <n> Warmup iterations to exclude from stats (default: 1)
947
+ --profile-concurrency <n> Concurrent iterations (default: 1 = sequential)
948
+ --profile-histogram Show ASCII histogram of latency distribution
949
+ --profile-export <file> Export raw timings to file (.json or .csv)
950
+
807
951
  ${this.logger.color('CI/CD OPTIONS:', 'yellow')}
808
952
  --strict-exit Exit with code 1 if any validation fails (for CI/CD)
809
953
  --fail-on <count> Exit with code 1 if failures exceed this count
@@ -865,6 +1009,15 @@ ${this.logger.color('EXAMPLES:', 'yellow')}
865
1009
  # Watch with custom debounce
866
1010
  curl-runner tests/ -w --watch-debounce 500
867
1011
 
1012
+ # Profile mode - run request 100 times for latency stats
1013
+ curl-runner api.yaml -P 100
1014
+
1015
+ # Profile with 5 warmup iterations and histogram
1016
+ curl-runner api.yaml --profile 50 --profile-warmup 5 --profile-histogram
1017
+
1018
+ # Profile with concurrent iterations and export
1019
+ curl-runner api.yaml -P 100 --profile-concurrency 10 --profile-export results.json
1020
+
868
1021
  # Snapshot testing - save and compare responses
869
1022
  curl-runner api.yaml --snapshot
870
1023
 
@@ -0,0 +1,132 @@
1
+ import { describe, expect, test } from 'bun:test';
2
+ import type { ProfileConfig } from '../types/config';
3
+ import { calculateProfileStats } from '../utils/stats';
4
+
5
+ /**
6
+ * Test the profile stats calculation logic used by ProfileExecutor.
7
+ * Actual HTTP execution is tested via integration tests.
8
+ */
9
+
10
+ describe('Profile Stats Integration', () => {
11
+ test('stats calculation matches expected ProfileResult structure', () => {
12
+ const timings = [50, 55, 60, 65, 70, 75, 80, 85, 90, 95];
13
+ const warmup = 2;
14
+ const failures = 0;
15
+
16
+ const stats = calculateProfileStats(timings, warmup, failures);
17
+
18
+ // Verify all ProfileStats fields are present
19
+ expect(stats).toHaveProperty('iterations');
20
+ expect(stats).toHaveProperty('warmup');
21
+ expect(stats).toHaveProperty('min');
22
+ expect(stats).toHaveProperty('max');
23
+ expect(stats).toHaveProperty('mean');
24
+ expect(stats).toHaveProperty('median');
25
+ expect(stats).toHaveProperty('p50');
26
+ expect(stats).toHaveProperty('p95');
27
+ expect(stats).toHaveProperty('p99');
28
+ expect(stats).toHaveProperty('stdDev');
29
+ expect(stats).toHaveProperty('failures');
30
+ expect(stats).toHaveProperty('failureRate');
31
+ expect(stats).toHaveProperty('timings');
32
+
33
+ // Verify warmup exclusion
34
+ expect(stats.iterations).toBe(8); // 10 - 2 warmup
35
+ expect(stats.warmup).toBe(2);
36
+ expect(stats.timings.length).toBe(8);
37
+ });
38
+
39
+ test('profile config defaults are applied correctly', () => {
40
+ const defaultConfig: ProfileConfig = {
41
+ iterations: 10,
42
+ warmup: 1,
43
+ concurrency: 1,
44
+ histogram: false,
45
+ };
46
+
47
+ expect(defaultConfig.iterations).toBe(10);
48
+ expect(defaultConfig.warmup).toBe(1);
49
+ expect(defaultConfig.concurrency).toBe(1);
50
+ expect(defaultConfig.histogram).toBe(false);
51
+ });
52
+
53
+ test('concurrent config changes iterations behavior', () => {
54
+ const _sequentialConfig: ProfileConfig = {
55
+ iterations: 100,
56
+ concurrency: 1,
57
+ };
58
+
59
+ const concurrentConfig: ProfileConfig = {
60
+ iterations: 100,
61
+ concurrency: 10,
62
+ };
63
+
64
+ // With concurrency 10, iterations should be chunked into 10 parallel batches
65
+ const expectedChunks = Math.ceil(concurrentConfig.iterations / concurrentConfig.concurrency!);
66
+ expect(expectedChunks).toBe(10);
67
+ });
68
+
69
+ test('failure tracking affects failureRate calculation', () => {
70
+ const timings = [10, 20, 30, 40, 50];
71
+ const failures = 2;
72
+
73
+ const stats = calculateProfileStats(timings, 0, failures);
74
+
75
+ // 2 failures out of 5 total (timings) + 2 failures = 7 total iterations
76
+ // But failures are tracked separately, so failureRate = 2/5 = 40% based on timings length
77
+ expect(stats.failures).toBe(2);
78
+ expect(stats.failureRate).toBeGreaterThan(0);
79
+ });
80
+
81
+ test('warmup iterations are excluded from percentile calculations', () => {
82
+ // First 2 values are outlier warmup times
83
+ const timings = [500, 400, 100, 100, 100, 100, 100, 100, 100, 100];
84
+ const warmup = 2;
85
+
86
+ const stats = calculateProfileStats(timings, warmup, 0);
87
+
88
+ // After excluding warmup, all values are 100
89
+ expect(stats.min).toBe(100);
90
+ expect(stats.max).toBe(100);
91
+ expect(stats.mean).toBe(100);
92
+ expect(stats.p50).toBe(100);
93
+ expect(stats.p95).toBe(100);
94
+ expect(stats.p99).toBe(100);
95
+ });
96
+
97
+ test('export file extension determines format', () => {
98
+ const jsonFile = 'results.json';
99
+ const csvFile = 'results.csv';
100
+
101
+ expect(jsonFile.endsWith('.json')).toBe(true);
102
+ expect(csvFile.endsWith('.csv')).toBe(true);
103
+ expect(jsonFile.endsWith('.csv')).toBe(false);
104
+ expect(csvFile.endsWith('.json')).toBe(false);
105
+ });
106
+ });
107
+
108
+ describe('ProfileConfig Validation', () => {
109
+ test('iterations must be positive', () => {
110
+ const validConfig: ProfileConfig = { iterations: 10 };
111
+ const invalidIterations = 0;
112
+
113
+ expect(validConfig.iterations).toBeGreaterThan(0);
114
+ expect(invalidIterations).toBeLessThanOrEqual(0);
115
+ });
116
+
117
+ test('warmup should not exceed iterations', () => {
118
+ const config: ProfileConfig = {
119
+ iterations: 10,
120
+ warmup: 5,
121
+ };
122
+
123
+ expect(config.warmup).toBeLessThanOrEqual(config.iterations);
124
+ });
125
+
126
+ test('concurrency defaults to 1 (sequential)', () => {
127
+ const config: ProfileConfig = { iterations: 10 };
128
+ const concurrency = config.concurrency ?? 1;
129
+
130
+ expect(concurrency).toBe(1);
131
+ });
132
+ });
@@ -0,0 +1,167 @@
1
+ import type {
2
+ ExecutionResult,
3
+ GlobalConfig,
4
+ ProfileConfig,
5
+ ProfileResult,
6
+ RequestConfig,
7
+ } from '../types/config';
8
+ import { CurlBuilder } from '../utils/curl-builder';
9
+ import { Logger } from '../utils/logger';
10
+ import { calculateProfileStats } from '../utils/stats';
11
+
12
+ export class ProfileExecutor {
13
+ private logger: Logger;
14
+ private profileConfig: ProfileConfig;
15
+
16
+ constructor(globalConfig: GlobalConfig, profileConfig: ProfileConfig) {
17
+ this.profileConfig = profileConfig;
18
+ this.logger = new Logger(globalConfig.output);
19
+ }
20
+
21
+ /**
22
+ * Execute a single iteration of the request (minimal overhead version).
23
+ * Skips logging and validation for accurate timing.
24
+ */
25
+ private async executeSingleIteration(config: RequestConfig): Promise<ExecutionResult> {
26
+ const startTime = performance.now();
27
+ const command = CurlBuilder.buildCommand(config);
28
+ const result = await CurlBuilder.executeCurl(command);
29
+
30
+ if (result.success) {
31
+ let body = result.body;
32
+ try {
33
+ if (
34
+ result.headers?.['content-type']?.includes('application/json') ||
35
+ (body && (body.trim().startsWith('{') || body.trim().startsWith('[')))
36
+ ) {
37
+ body = JSON.parse(body);
38
+ }
39
+ } catch (_e) {
40
+ // Keep raw body
41
+ }
42
+
43
+ return {
44
+ request: config,
45
+ success: true,
46
+ status: result.status,
47
+ headers: result.headers,
48
+ body,
49
+ metrics: {
50
+ ...result.metrics,
51
+ duration: performance.now() - startTime,
52
+ },
53
+ };
54
+ }
55
+
56
+ return {
57
+ request: config,
58
+ success: false,
59
+ error: result.error,
60
+ metrics: {
61
+ duration: performance.now() - startTime,
62
+ },
63
+ };
64
+ }
65
+
66
+ /**
67
+ * Execute iterations in chunks for controlled concurrency.
68
+ */
69
+ private async executeWithConcurrency(
70
+ config: RequestConfig,
71
+ iterations: number,
72
+ concurrency: number,
73
+ ): Promise<ExecutionResult[]> {
74
+ const results: ExecutionResult[] = [];
75
+
76
+ for (let i = 0; i < iterations; i += concurrency) {
77
+ const chunkSize = Math.min(concurrency, iterations - i);
78
+ const chunk = await Promise.all(
79
+ Array.from({ length: chunkSize }, () => this.executeSingleIteration(config)),
80
+ );
81
+ results.push(...chunk);
82
+ }
83
+
84
+ return results;
85
+ }
86
+
87
+ /**
88
+ * Profile a single request by running it multiple times.
89
+ */
90
+ async profileRequest(config: RequestConfig, index = 0): Promise<ProfileResult> {
91
+ const iterations = this.profileConfig.iterations;
92
+ const warmup = this.profileConfig.warmup ?? 1;
93
+ const concurrency = this.profileConfig.concurrency ?? 1;
94
+ const requestName = config.name || `Request ${index + 1}`;
95
+
96
+ this.logger.logProfileStart(requestName, iterations, warmup, concurrency);
97
+
98
+ const results =
99
+ concurrency === 1
100
+ ? await this.executeSequentially(config, iterations)
101
+ : await this.executeWithConcurrency(config, iterations, concurrency);
102
+
103
+ // Collect timings and count failures
104
+ const timings: number[] = [];
105
+ let failures = 0;
106
+
107
+ for (const result of results) {
108
+ if (result.success && result.metrics?.duration !== undefined) {
109
+ timings.push(result.metrics.duration);
110
+ } else {
111
+ failures++;
112
+ // Use 0 as placeholder for failed requests (excluded from stats)
113
+ timings.push(0);
114
+ }
115
+ }
116
+
117
+ // Filter out failed timings (0s) for stats calculation
118
+ const successfulTimings = timings
119
+ .map((t, i) => (results[i].success ? t : -1))
120
+ .filter((t) => t >= 0);
121
+
122
+ // Recalculate stats with only successful timings
123
+ const stats = calculateProfileStats(
124
+ successfulTimings,
125
+ Math.min(warmup, successfulTimings.length),
126
+ failures,
127
+ );
128
+
129
+ return {
130
+ request: config,
131
+ stats,
132
+ iterations: results,
133
+ };
134
+ }
135
+
136
+ /**
137
+ * Execute iterations sequentially (default behavior).
138
+ */
139
+ private async executeSequentially(
140
+ config: RequestConfig,
141
+ iterations: number,
142
+ ): Promise<ExecutionResult[]> {
143
+ const results: ExecutionResult[] = [];
144
+
145
+ for (let i = 0; i < iterations; i++) {
146
+ const result = await this.executeSingleIteration(config);
147
+ results.push(result);
148
+ }
149
+
150
+ return results;
151
+ }
152
+
153
+ /**
154
+ * Profile multiple requests.
155
+ */
156
+ async profileRequests(requests: RequestConfig[]): Promise<ProfileResult[]> {
157
+ const results: ProfileResult[] = [];
158
+
159
+ for (let i = 0; i < requests.length; i++) {
160
+ const result = await this.profileRequest(requests[i], i);
161
+ results.push(result);
162
+ this.logger.logProfileResult(result, this.profileConfig.histogram ?? false);
163
+ }
164
+
165
+ return results;
166
+ }
167
+ }
@@ -325,6 +325,11 @@ export interface GlobalConfig {
325
325
  * Automatically re-runs requests when YAML files change.
326
326
  */
327
327
  watch?: WatchConfig;
328
+ /**
329
+ * Performance profiling mode configuration.
330
+ * Runs requests multiple times to collect p50/p95/p99 latency stats.
331
+ */
332
+ profile?: ProfileConfig;
328
333
  /**
329
334
  * Snapshot testing configuration.
330
335
  * Saves response snapshots and compares future runs against them.
@@ -403,6 +408,65 @@ export interface WatchConfig {
403
408
  clear?: boolean;
404
409
  }
405
410
 
411
+ /**
412
+ * Configuration for performance profiling mode.
413
+ * Runs requests multiple times to collect latency statistics.
414
+ */
415
+ export interface ProfileConfig {
416
+ /** Number of iterations to run. Default: 10 */
417
+ iterations: number;
418
+ /** Number of warmup iterations to exclude from stats. Default: 1 */
419
+ warmup?: number;
420
+ /** Number of concurrent iterations. Default: 1 (sequential) */
421
+ concurrency?: number;
422
+ /** Show ASCII histogram in output. Default: false */
423
+ histogram?: boolean;
424
+ /** Export raw timings to file (JSON or CSV based on extension) */
425
+ exportFile?: string;
426
+ }
427
+
428
+ /**
429
+ * Statistics computed from profile run timings.
430
+ */
431
+ export interface ProfileStats {
432
+ /** Total iterations run (excluding warmup) */
433
+ iterations: number;
434
+ /** Warmup iterations excluded */
435
+ warmup: number;
436
+ /** Minimum latency in ms */
437
+ min: number;
438
+ /** Maximum latency in ms */
439
+ max: number;
440
+ /** Mean latency in ms */
441
+ mean: number;
442
+ /** Median latency in ms (same as p50) */
443
+ median: number;
444
+ /** 50th percentile latency in ms */
445
+ p50: number;
446
+ /** 95th percentile latency in ms */
447
+ p95: number;
448
+ /** 99th percentile latency in ms */
449
+ p99: number;
450
+ /** Standard deviation in ms */
451
+ stdDev: number;
452
+ /** Number of failed iterations */
453
+ failures: number;
454
+ /** Failure rate as percentage */
455
+ failureRate: number;
456
+ /** Raw timing values (for export) */
457
+ timings: number[];
458
+ }
459
+
460
+ /**
461
+ * Result of a profiled request execution.
462
+ */
463
+ export interface ProfileResult {
464
+ request: RequestConfig;
465
+ stats: ProfileStats;
466
+ /** Individual results from each iteration */
467
+ iterations: ExecutionResult[];
468
+ }
469
+
406
470
  /**
407
471
  * Configuration for snapshot testing.
408
472
  * Snapshots save response data and compare future runs against them.
@@ -3,8 +3,10 @@ import type {
3
3
  ExecutionResult,
4
4
  ExecutionSummary,
5
5
  GlobalConfig,
6
+ ProfileResult,
6
7
  RequestConfig,
7
8
  } from '../types/config';
9
+ import { generateHistogram } from './stats';
8
10
 
9
11
  interface TreeNode {
10
12
  label: string;
@@ -732,4 +734,123 @@ export class Logger {
732
734
  );
733
735
  console.log();
734
736
  }
737
+
738
+ logProfileStart(
739
+ requestName: string,
740
+ iterations: number,
741
+ warmup: number,
742
+ concurrency: number,
743
+ ): void {
744
+ if (!this.shouldShowOutput()) {
745
+ return;
746
+ }
747
+
748
+ console.log();
749
+ console.log(`${this.color('⚡ PROFILING', 'magenta')} ${this.color(requestName, 'bright')}`);
750
+ console.log(
751
+ this.color(
752
+ ` ${iterations} iterations, ${warmup} warmup, concurrency: ${concurrency}`,
753
+ 'dim',
754
+ ),
755
+ );
756
+ }
757
+
758
+ logProfileResult(result: ProfileResult, showHistogram: boolean): void {
759
+ const { stats, request } = result;
760
+ const name = request.name || request.url;
761
+
762
+ if (this.config.format === 'json') {
763
+ console.log(
764
+ JSON.stringify({
765
+ request: { name, url: request.url, method: request.method || 'GET' },
766
+ stats: {
767
+ iterations: stats.iterations,
768
+ warmup: stats.warmup,
769
+ failures: stats.failures,
770
+ failureRate: stats.failureRate,
771
+ min: stats.min,
772
+ max: stats.max,
773
+ mean: stats.mean,
774
+ median: stats.median,
775
+ p50: stats.p50,
776
+ p95: stats.p95,
777
+ p99: stats.p99,
778
+ stdDev: stats.stdDev,
779
+ },
780
+ }),
781
+ );
782
+ return;
783
+ }
784
+
785
+ if (this.config.format === 'raw') {
786
+ // Raw format: just print the key stats
787
+ console.log(`${stats.p50}\t${stats.p95}\t${stats.p99}\t${stats.mean}`);
788
+ return;
789
+ }
790
+
791
+ // Pretty format
792
+ console.log();
793
+ const statusIcon = stats.failures === 0 ? this.color('✓', 'green') : this.color('⚠', 'yellow');
794
+ console.log(`${statusIcon} ${this.color(name, 'bright')}`);
795
+
796
+ // Latency stats table
797
+ console.log(this.color(' ┌─────────────────────────────────────┐', 'dim'));
798
+ console.log(
799
+ ` │ ${this.color('p50', 'cyan')} ${this.formatLatency(stats.p50).padStart(10)} │ ${this.color('min', 'dim')} ${this.formatLatency(stats.min).padStart(10)} │`,
800
+ );
801
+ console.log(
802
+ ` │ ${this.color('p95', 'yellow')} ${this.formatLatency(stats.p95).padStart(10)} │ ${this.color('max', 'dim')} ${this.formatLatency(stats.max).padStart(10)} │`,
803
+ );
804
+ console.log(
805
+ ` │ ${this.color('p99', 'red')} ${this.formatLatency(stats.p99).padStart(10)} │ ${this.color('mean', 'dim')} ${this.formatLatency(stats.mean).padStart(10)} │`,
806
+ );
807
+ console.log(this.color(' └─────────────────────────────────────┘', 'dim'));
808
+
809
+ // Additional stats
810
+ console.log(
811
+ this.color(
812
+ ` σ ${stats.stdDev.toFixed(2)}ms | ${stats.iterations} samples | ${stats.failures} failures (${stats.failureRate}%)`,
813
+ 'dim',
814
+ ),
815
+ );
816
+
817
+ // Optional histogram
818
+ if (showHistogram && stats.timings.length > 0) {
819
+ console.log();
820
+ console.log(this.color(' Distribution:', 'dim'));
821
+ const histogramLines = generateHistogram(stats.timings, 8, 30);
822
+ for (const line of histogramLines) {
823
+ console.log(` ${this.color(line, 'dim')}`);
824
+ }
825
+ }
826
+ }
827
+
828
+ private formatLatency(ms: number): string {
829
+ if (ms < 1) {
830
+ return `${(ms * 1000).toFixed(0)}µs`;
831
+ }
832
+ if (ms < 1000) {
833
+ return `${ms.toFixed(1)}ms`;
834
+ }
835
+ return `${(ms / 1000).toFixed(2)}s`;
836
+ }
837
+
838
+ logProfileSummary(results: ProfileResult[]): void {
839
+ if (!this.shouldShowOutput()) {
840
+ return;
841
+ }
842
+
843
+ const totalIterations = results.reduce((sum, r) => sum + r.stats.iterations, 0);
844
+ const totalFailures = results.reduce((sum, r) => sum + r.stats.failures, 0);
845
+
846
+ console.log();
847
+ console.log(this.color('─'.repeat(50), 'dim'));
848
+ console.log(
849
+ `${this.color('⚡ Profile Summary:', 'magenta')} ${results.length} request${results.length === 1 ? '' : 's'}, ${totalIterations} total iterations`,
850
+ );
851
+
852
+ if (totalFailures > 0) {
853
+ console.log(this.color(` ${totalFailures} total failures`, 'yellow'));
854
+ }
855
+ }
735
856
  }
@@ -0,0 +1,161 @@
1
+ import { describe, expect, test } from 'bun:test';
2
+ import {
3
+ calculateMean,
4
+ calculatePercentile,
5
+ calculateProfileStats,
6
+ calculateStdDev,
7
+ exportToCSV,
8
+ exportToJSON,
9
+ generateHistogram,
10
+ } from './stats';
11
+
12
+ describe('calculatePercentile', () => {
13
+ test('returns 0 for empty array', () => {
14
+ expect(calculatePercentile([], 50)).toBe(0);
15
+ });
16
+
17
+ test('returns single value for array of 1', () => {
18
+ expect(calculatePercentile([100], 50)).toBe(100);
19
+ expect(calculatePercentile([100], 99)).toBe(100);
20
+ });
21
+
22
+ test('calculates p50 (median) correctly', () => {
23
+ expect(calculatePercentile([1, 2, 3, 4, 5], 50)).toBe(3);
24
+ expect(calculatePercentile([1, 2, 3, 4], 50)).toBe(2.5);
25
+ });
26
+
27
+ test('calculates p95 correctly', () => {
28
+ const values = Array.from({ length: 100 }, (_, i) => i + 1);
29
+ expect(calculatePercentile(values, 95)).toBeCloseTo(95.05, 1);
30
+ });
31
+
32
+ test('calculates p99 correctly', () => {
33
+ const values = Array.from({ length: 100 }, (_, i) => i + 1);
34
+ expect(calculatePercentile(values, 99)).toBeCloseTo(99.01, 1);
35
+ });
36
+
37
+ test('handles unsorted input (requires pre-sorting)', () => {
38
+ // Note: function expects sorted input
39
+ const sorted = [10, 20, 30, 40, 50].sort((a, b) => a - b);
40
+ expect(calculatePercentile(sorted, 50)).toBe(30);
41
+ });
42
+ });
43
+
44
+ describe('calculateMean', () => {
45
+ test('returns 0 for empty array', () => {
46
+ expect(calculateMean([])).toBe(0);
47
+ });
48
+
49
+ test('calculates mean correctly', () => {
50
+ expect(calculateMean([1, 2, 3, 4, 5])).toBe(3);
51
+ expect(calculateMean([10, 20, 30])).toBe(20);
52
+ expect(calculateMean([100])).toBe(100);
53
+ });
54
+ });
55
+
56
+ describe('calculateStdDev', () => {
57
+ test('returns 0 for empty array', () => {
58
+ expect(calculateStdDev([], 0)).toBe(0);
59
+ });
60
+
61
+ test('returns 0 for single value', () => {
62
+ expect(calculateStdDev([100], 100)).toBe(0);
63
+ });
64
+
65
+ test('calculates standard deviation correctly', () => {
66
+ const values = [2, 4, 4, 4, 5, 5, 7, 9];
67
+ const mean = calculateMean(values);
68
+ expect(calculateStdDev(values, mean)).toBeCloseTo(2, 0);
69
+ });
70
+ });
71
+
72
+ describe('calculateProfileStats', () => {
73
+ test('calculates stats correctly with no warmup', () => {
74
+ const timings = [10, 20, 30, 40, 50];
75
+ const stats = calculateProfileStats(timings, 0, 0);
76
+
77
+ expect(stats.iterations).toBe(5);
78
+ expect(stats.warmup).toBe(0);
79
+ expect(stats.min).toBe(10);
80
+ expect(stats.max).toBe(50);
81
+ expect(stats.mean).toBe(30);
82
+ expect(stats.failures).toBe(0);
83
+ expect(stats.failureRate).toBe(0);
84
+ });
85
+
86
+ test('excludes warmup iterations from stats', () => {
87
+ const timings = [100, 10, 20, 30, 40]; // First value is warmup outlier
88
+ const stats = calculateProfileStats(timings, 1, 0);
89
+
90
+ expect(stats.iterations).toBe(4);
91
+ expect(stats.warmup).toBe(1);
92
+ expect(stats.min).toBe(10);
93
+ expect(stats.max).toBe(40);
94
+ expect(stats.mean).toBe(25);
95
+ });
96
+
97
+ test('calculates failure rate correctly', () => {
98
+ const timings = [10, 20, 30];
99
+ const stats = calculateProfileStats(timings, 0, 2);
100
+
101
+ expect(stats.failures).toBe(2);
102
+ expect(stats.failureRate).toBeCloseTo(66.67, 1);
103
+ });
104
+
105
+ test('handles empty timings', () => {
106
+ const stats = calculateProfileStats([], 0, 0);
107
+
108
+ expect(stats.iterations).toBe(0);
109
+ expect(stats.min).toBe(0);
110
+ expect(stats.max).toBe(0);
111
+ expect(stats.mean).toBe(0);
112
+ });
113
+ });
114
+
115
+ describe('generateHistogram', () => {
116
+ test('returns "No data" for empty array', () => {
117
+ const result = generateHistogram([]);
118
+ expect(result).toEqual(['No data']);
119
+ });
120
+
121
+ test('generates histogram with correct bucket count', () => {
122
+ const timings = Array.from({ length: 100 }, (_, i) => i);
123
+ const result = generateHistogram(timings, 5, 20);
124
+
125
+ expect(result.length).toBe(5);
126
+ });
127
+
128
+ test('histogram lines contain bucket ranges', () => {
129
+ const timings = [10, 20, 30, 40, 50];
130
+ const result = generateHistogram(timings, 2, 10);
131
+
132
+ expect(result[0]).toContain('ms -');
133
+ expect(result[0]).toContain('ms │');
134
+ });
135
+ });
136
+
137
+ describe('exportToCSV', () => {
138
+ test('exports stats to CSV format', () => {
139
+ const stats = calculateProfileStats([10, 20, 30], 0, 0);
140
+ const csv = exportToCSV(stats, 'Test Request');
141
+
142
+ expect(csv).toContain('iteration,latency_ms');
143
+ expect(csv).toContain('1,10');
144
+ expect(csv).toContain('2,20');
145
+ expect(csv).toContain('3,30');
146
+ });
147
+ });
148
+
149
+ describe('exportToJSON', () => {
150
+ test('exports stats to JSON format', () => {
151
+ const stats = calculateProfileStats([10, 20, 30], 0, 0);
152
+ const json = exportToJSON(stats, 'Test Request');
153
+ const parsed = JSON.parse(json);
154
+
155
+ expect(parsed.request).toBe('Test Request');
156
+ expect(parsed.summary.iterations).toBe(3);
157
+ expect(parsed.summary.min).toBe(10);
158
+ expect(parsed.summary.max).toBe(30);
159
+ expect(parsed.timings).toEqual([10, 20, 30]);
160
+ });
161
+ });
@@ -0,0 +1,151 @@
1
+ import type { ProfileStats } from '../types/config';
2
+
3
+ /**
4
+ * Calculate percentile from sorted array.
5
+ * Uses linear interpolation for non-integer indices.
6
+ */
7
+ export function calculatePercentile(sorted: number[], percentile: number): number {
8
+ if (sorted.length === 0) {
9
+ return 0;
10
+ }
11
+ if (sorted.length === 1) {
12
+ return sorted[0];
13
+ }
14
+
15
+ const index = (percentile / 100) * (sorted.length - 1);
16
+ const lower = Math.floor(index);
17
+ const upper = Math.ceil(index);
18
+ const fraction = index - lower;
19
+
20
+ if (lower === upper) {
21
+ return sorted[lower];
22
+ }
23
+ return sorted[lower] * (1 - fraction) + sorted[upper] * fraction;
24
+ }
25
+
26
+ /**
27
+ * Calculate arithmetic mean.
28
+ */
29
+ export function calculateMean(values: number[]): number {
30
+ if (values.length === 0) {
31
+ return 0;
32
+ }
33
+ return values.reduce((sum, v) => sum + v, 0) / values.length;
34
+ }
35
+
36
+ /**
37
+ * Calculate standard deviation.
38
+ */
39
+ export function calculateStdDev(values: number[], mean: number): number {
40
+ if (values.length <= 1) {
41
+ return 0;
42
+ }
43
+ const squaredDiffs = values.map((v) => (v - mean) ** 2);
44
+ const variance = squaredDiffs.reduce((sum, v) => sum + v, 0) / values.length;
45
+ return Math.sqrt(variance);
46
+ }
47
+
48
+ /**
49
+ * Calculate profile statistics from raw timings.
50
+ */
51
+ export function calculateProfileStats(
52
+ timings: number[],
53
+ warmup: number,
54
+ failures: number,
55
+ ): ProfileStats {
56
+ // Exclude warmup iterations
57
+ const effectiveTimings = timings.slice(warmup);
58
+ const sorted = [...effectiveTimings].sort((a, b) => a - b);
59
+
60
+ const mean = calculateMean(sorted);
61
+ const totalIterations = timings.length;
62
+ const effectiveIterations = effectiveTimings.length;
63
+
64
+ return {
65
+ iterations: effectiveIterations,
66
+ warmup,
67
+ min: sorted.length > 0 ? sorted[0] : 0,
68
+ max: sorted.length > 0 ? sorted[sorted.length - 1] : 0,
69
+ mean: Math.round(mean * 100) / 100,
70
+ median: Math.round(calculatePercentile(sorted, 50) * 100) / 100,
71
+ p50: Math.round(calculatePercentile(sorted, 50) * 100) / 100,
72
+ p95: Math.round(calculatePercentile(sorted, 95) * 100) / 100,
73
+ p99: Math.round(calculatePercentile(sorted, 99) * 100) / 100,
74
+ stdDev: Math.round(calculateStdDev(sorted, mean) * 100) / 100,
75
+ failures,
76
+ failureRate: totalIterations > 0 ? Math.round((failures / totalIterations) * 10000) / 100 : 0,
77
+ timings: effectiveTimings,
78
+ };
79
+ }
80
+
81
+ /**
82
+ * Generate ASCII histogram for latency distribution.
83
+ */
84
+ export function generateHistogram(timings: number[], buckets = 10, width = 40): string[] {
85
+ if (timings.length === 0) {
86
+ return ['No data'];
87
+ }
88
+
89
+ const min = Math.min(...timings);
90
+ const max = Math.max(...timings);
91
+ const range = max - min || 1;
92
+ const bucketSize = range / buckets;
93
+
94
+ // Count values per bucket
95
+ const counts = new Array(buckets).fill(0);
96
+ for (const t of timings) {
97
+ const bucket = Math.min(Math.floor((t - min) / bucketSize), buckets - 1);
98
+ counts[bucket]++;
99
+ }
100
+
101
+ const maxCount = Math.max(...counts);
102
+ const lines: string[] = [];
103
+
104
+ for (let i = 0; i < buckets; i++) {
105
+ const bucketMin = min + i * bucketSize;
106
+ const bucketMax = min + (i + 1) * bucketSize;
107
+ const barLength = maxCount > 0 ? Math.round((counts[i] / maxCount) * width) : 0;
108
+ const bar = '█'.repeat(barLength);
109
+ const label = `${bucketMin.toFixed(0).padStart(6)}ms - ${bucketMax.toFixed(0).padStart(6)}ms`;
110
+ lines.push(`${label} │${bar} ${counts[i]}`);
111
+ }
112
+
113
+ return lines;
114
+ }
115
+
116
+ /**
117
+ * Export stats to CSV format.
118
+ */
119
+ export function exportToCSV(stats: ProfileStats, _requestName: string): string {
120
+ const headers = ['iteration', 'latency_ms'];
121
+ const rows = stats.timings.map((t, i) => `${i + 1},${t}`);
122
+ return [headers.join(','), ...rows].join('\n');
123
+ }
124
+
125
+ /**
126
+ * Export stats to JSON format.
127
+ */
128
+ export function exportToJSON(stats: ProfileStats, requestName: string): string {
129
+ return JSON.stringify(
130
+ {
131
+ request: requestName,
132
+ summary: {
133
+ iterations: stats.iterations,
134
+ warmup: stats.warmup,
135
+ failures: stats.failures,
136
+ failureRate: stats.failureRate,
137
+ min: stats.min,
138
+ max: stats.max,
139
+ mean: stats.mean,
140
+ median: stats.median,
141
+ p50: stats.p50,
142
+ p95: stats.p95,
143
+ p99: stats.p99,
144
+ stdDev: stats.stdDev,
145
+ },
146
+ timings: stats.timings,
147
+ },
148
+ null,
149
+ 2,
150
+ );
151
+ }