@curl-runner/cli 1.13.0 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/cli.ts +423 -1
- package/src/diff/baseline-manager.test.ts +181 -0
- package/src/diff/baseline-manager.ts +266 -0
- package/src/diff/diff-formatter.ts +316 -0
- package/src/diff/index.ts +3 -0
- package/src/diff/response-differ.test.ts +330 -0
- package/src/diff/response-differ.ts +489 -0
- package/src/executor/profile-executor.test.ts +132 -0
- package/src/executor/profile-executor.ts +167 -0
- package/src/types/config.ts +166 -0
- package/src/utils/logger.ts +121 -0
- package/src/utils/stats.test.ts +161 -0
- package/src/utils/stats.ts +151 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
ExecutionResult,
|
|
3
|
+
GlobalConfig,
|
|
4
|
+
ProfileConfig,
|
|
5
|
+
ProfileResult,
|
|
6
|
+
RequestConfig,
|
|
7
|
+
} from '../types/config';
|
|
8
|
+
import { CurlBuilder } from '../utils/curl-builder';
|
|
9
|
+
import { Logger } from '../utils/logger';
|
|
10
|
+
import { calculateProfileStats } from '../utils/stats';
|
|
11
|
+
|
|
12
|
+
export class ProfileExecutor {
|
|
13
|
+
private logger: Logger;
|
|
14
|
+
private profileConfig: ProfileConfig;
|
|
15
|
+
|
|
16
|
+
constructor(globalConfig: GlobalConfig, profileConfig: ProfileConfig) {
|
|
17
|
+
this.profileConfig = profileConfig;
|
|
18
|
+
this.logger = new Logger(globalConfig.output);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Execute a single iteration of the request (minimal overhead version).
|
|
23
|
+
* Skips logging and validation for accurate timing.
|
|
24
|
+
*/
|
|
25
|
+
private async executeSingleIteration(config: RequestConfig): Promise<ExecutionResult> {
|
|
26
|
+
const startTime = performance.now();
|
|
27
|
+
const command = CurlBuilder.buildCommand(config);
|
|
28
|
+
const result = await CurlBuilder.executeCurl(command);
|
|
29
|
+
|
|
30
|
+
if (result.success) {
|
|
31
|
+
let body = result.body;
|
|
32
|
+
try {
|
|
33
|
+
if (
|
|
34
|
+
result.headers?.['content-type']?.includes('application/json') ||
|
|
35
|
+
(body && (body.trim().startsWith('{') || body.trim().startsWith('[')))
|
|
36
|
+
) {
|
|
37
|
+
body = JSON.parse(body);
|
|
38
|
+
}
|
|
39
|
+
} catch (_e) {
|
|
40
|
+
// Keep raw body
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
request: config,
|
|
45
|
+
success: true,
|
|
46
|
+
status: result.status,
|
|
47
|
+
headers: result.headers,
|
|
48
|
+
body,
|
|
49
|
+
metrics: {
|
|
50
|
+
...result.metrics,
|
|
51
|
+
duration: performance.now() - startTime,
|
|
52
|
+
},
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return {
|
|
57
|
+
request: config,
|
|
58
|
+
success: false,
|
|
59
|
+
error: result.error,
|
|
60
|
+
metrics: {
|
|
61
|
+
duration: performance.now() - startTime,
|
|
62
|
+
},
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Execute iterations in chunks for controlled concurrency.
|
|
68
|
+
*/
|
|
69
|
+
private async executeWithConcurrency(
|
|
70
|
+
config: RequestConfig,
|
|
71
|
+
iterations: number,
|
|
72
|
+
concurrency: number,
|
|
73
|
+
): Promise<ExecutionResult[]> {
|
|
74
|
+
const results: ExecutionResult[] = [];
|
|
75
|
+
|
|
76
|
+
for (let i = 0; i < iterations; i += concurrency) {
|
|
77
|
+
const chunkSize = Math.min(concurrency, iterations - i);
|
|
78
|
+
const chunk = await Promise.all(
|
|
79
|
+
Array.from({ length: chunkSize }, () => this.executeSingleIteration(config)),
|
|
80
|
+
);
|
|
81
|
+
results.push(...chunk);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return results;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Profile a single request by running it multiple times.
|
|
89
|
+
*/
|
|
90
|
+
async profileRequest(config: RequestConfig, index = 0): Promise<ProfileResult> {
|
|
91
|
+
const iterations = this.profileConfig.iterations;
|
|
92
|
+
const warmup = this.profileConfig.warmup ?? 1;
|
|
93
|
+
const concurrency = this.profileConfig.concurrency ?? 1;
|
|
94
|
+
const requestName = config.name || `Request ${index + 1}`;
|
|
95
|
+
|
|
96
|
+
this.logger.logProfileStart(requestName, iterations, warmup, concurrency);
|
|
97
|
+
|
|
98
|
+
const results =
|
|
99
|
+
concurrency === 1
|
|
100
|
+
? await this.executeSequentially(config, iterations)
|
|
101
|
+
: await this.executeWithConcurrency(config, iterations, concurrency);
|
|
102
|
+
|
|
103
|
+
// Collect timings and count failures
|
|
104
|
+
const timings: number[] = [];
|
|
105
|
+
let failures = 0;
|
|
106
|
+
|
|
107
|
+
for (const result of results) {
|
|
108
|
+
if (result.success && result.metrics?.duration !== undefined) {
|
|
109
|
+
timings.push(result.metrics.duration);
|
|
110
|
+
} else {
|
|
111
|
+
failures++;
|
|
112
|
+
// Use 0 as placeholder for failed requests (excluded from stats)
|
|
113
|
+
timings.push(0);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Filter out failed timings (0s) for stats calculation
|
|
118
|
+
const successfulTimings = timings
|
|
119
|
+
.map((t, i) => (results[i].success ? t : -1))
|
|
120
|
+
.filter((t) => t >= 0);
|
|
121
|
+
|
|
122
|
+
// Recalculate stats with only successful timings
|
|
123
|
+
const stats = calculateProfileStats(
|
|
124
|
+
successfulTimings,
|
|
125
|
+
Math.min(warmup, successfulTimings.length),
|
|
126
|
+
failures,
|
|
127
|
+
);
|
|
128
|
+
|
|
129
|
+
return {
|
|
130
|
+
request: config,
|
|
131
|
+
stats,
|
|
132
|
+
iterations: results,
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Execute iterations sequentially (default behavior).
|
|
138
|
+
*/
|
|
139
|
+
private async executeSequentially(
|
|
140
|
+
config: RequestConfig,
|
|
141
|
+
iterations: number,
|
|
142
|
+
): Promise<ExecutionResult[]> {
|
|
143
|
+
const results: ExecutionResult[] = [];
|
|
144
|
+
|
|
145
|
+
for (let i = 0; i < iterations; i++) {
|
|
146
|
+
const result = await this.executeSingleIteration(config);
|
|
147
|
+
results.push(result);
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
return results;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Profile multiple requests.
|
|
155
|
+
*/
|
|
156
|
+
async profileRequests(requests: RequestConfig[]): Promise<ProfileResult[]> {
|
|
157
|
+
const results: ProfileResult[] = [];
|
|
158
|
+
|
|
159
|
+
for (let i = 0; i < requests.length; i++) {
|
|
160
|
+
const result = await this.profileRequest(requests[i], i);
|
|
161
|
+
results.push(result);
|
|
162
|
+
this.logger.logProfileResult(result, this.profileConfig.histogram ?? false);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return results;
|
|
166
|
+
}
|
|
167
|
+
}
|
package/src/types/config.ts
CHANGED
|
@@ -257,6 +257,11 @@ export interface RequestConfig {
|
|
|
257
257
|
* Use `true` to enable with defaults, or provide detailed config.
|
|
258
258
|
*/
|
|
259
259
|
snapshot?: SnapshotConfig | boolean;
|
|
260
|
+
/**
|
|
261
|
+
* Response diffing configuration for this request.
|
|
262
|
+
* Use `true` to enable with defaults, or provide detailed config.
|
|
263
|
+
*/
|
|
264
|
+
diff?: DiffConfig | boolean;
|
|
260
265
|
sourceOutputConfig?: {
|
|
261
266
|
verbose?: boolean;
|
|
262
267
|
showHeaders?: boolean;
|
|
@@ -325,11 +330,21 @@ export interface GlobalConfig {
|
|
|
325
330
|
* Automatically re-runs requests when YAML files change.
|
|
326
331
|
*/
|
|
327
332
|
watch?: WatchConfig;
|
|
333
|
+
/**
|
|
334
|
+
* Performance profiling mode configuration.
|
|
335
|
+
* Runs requests multiple times to collect p50/p95/p99 latency stats.
|
|
336
|
+
*/
|
|
337
|
+
profile?: ProfileConfig;
|
|
328
338
|
/**
|
|
329
339
|
* Snapshot testing configuration.
|
|
330
340
|
* Saves response snapshots and compares future runs against them.
|
|
331
341
|
*/
|
|
332
342
|
snapshot?: GlobalSnapshotConfig;
|
|
343
|
+
/**
|
|
344
|
+
* Response diffing configuration.
|
|
345
|
+
* Compare responses between environments or runs to detect API drift.
|
|
346
|
+
*/
|
|
347
|
+
diff?: GlobalDiffConfig;
|
|
333
348
|
variables?: Record<string, string>;
|
|
334
349
|
output?: {
|
|
335
350
|
verbose?: boolean;
|
|
@@ -369,6 +384,8 @@ export interface ExecutionResult {
|
|
|
369
384
|
};
|
|
370
385
|
/** Snapshot comparison result (if snapshot testing enabled). */
|
|
371
386
|
snapshotResult?: SnapshotCompareResult;
|
|
387
|
+
/** Diff comparison result (if response diffing enabled). */
|
|
388
|
+
diffResult?: DiffCompareResult;
|
|
372
389
|
/** Whether this request was skipped due to a `when` condition. */
|
|
373
390
|
skipped?: boolean;
|
|
374
391
|
/** Reason the request was skipped (condition that failed). */
|
|
@@ -403,6 +420,65 @@ export interface WatchConfig {
|
|
|
403
420
|
clear?: boolean;
|
|
404
421
|
}
|
|
405
422
|
|
|
423
|
+
/**
|
|
424
|
+
* Configuration for performance profiling mode.
|
|
425
|
+
* Runs requests multiple times to collect latency statistics.
|
|
426
|
+
*/
|
|
427
|
+
export interface ProfileConfig {
|
|
428
|
+
/** Number of iterations to run. Default: 10 */
|
|
429
|
+
iterations: number;
|
|
430
|
+
/** Number of warmup iterations to exclude from stats. Default: 1 */
|
|
431
|
+
warmup?: number;
|
|
432
|
+
/** Number of concurrent iterations. Default: 1 (sequential) */
|
|
433
|
+
concurrency?: number;
|
|
434
|
+
/** Show ASCII histogram in output. Default: false */
|
|
435
|
+
histogram?: boolean;
|
|
436
|
+
/** Export raw timings to file (JSON or CSV based on extension) */
|
|
437
|
+
exportFile?: string;
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/**
|
|
441
|
+
* Statistics computed from profile run timings.
|
|
442
|
+
*/
|
|
443
|
+
export interface ProfileStats {
|
|
444
|
+
/** Total iterations run (excluding warmup) */
|
|
445
|
+
iterations: number;
|
|
446
|
+
/** Warmup iterations excluded */
|
|
447
|
+
warmup: number;
|
|
448
|
+
/** Minimum latency in ms */
|
|
449
|
+
min: number;
|
|
450
|
+
/** Maximum latency in ms */
|
|
451
|
+
max: number;
|
|
452
|
+
/** Mean latency in ms */
|
|
453
|
+
mean: number;
|
|
454
|
+
/** Median latency in ms (same as p50) */
|
|
455
|
+
median: number;
|
|
456
|
+
/** 50th percentile latency in ms */
|
|
457
|
+
p50: number;
|
|
458
|
+
/** 95th percentile latency in ms */
|
|
459
|
+
p95: number;
|
|
460
|
+
/** 99th percentile latency in ms */
|
|
461
|
+
p99: number;
|
|
462
|
+
/** Standard deviation in ms */
|
|
463
|
+
stdDev: number;
|
|
464
|
+
/** Number of failed iterations */
|
|
465
|
+
failures: number;
|
|
466
|
+
/** Failure rate as percentage */
|
|
467
|
+
failureRate: number;
|
|
468
|
+
/** Raw timing values (for export) */
|
|
469
|
+
timings: number[];
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
/**
|
|
473
|
+
* Result of a profiled request execution.
|
|
474
|
+
*/
|
|
475
|
+
export interface ProfileResult {
|
|
476
|
+
request: RequestConfig;
|
|
477
|
+
stats: ProfileStats;
|
|
478
|
+
/** Individual results from each iteration */
|
|
479
|
+
iterations: ExecutionResult[];
|
|
480
|
+
}
|
|
481
|
+
|
|
406
482
|
/**
|
|
407
483
|
* Configuration for snapshot testing.
|
|
408
484
|
* Snapshots save response data and compare future runs against them.
|
|
@@ -470,3 +546,93 @@ export interface SnapshotCompareResult {
|
|
|
470
546
|
updated: boolean;
|
|
471
547
|
differences: SnapshotDiff[];
|
|
472
548
|
}
|
|
549
|
+
|
|
550
|
+
/**
|
|
551
|
+
* Configuration for response diffing at request level.
|
|
552
|
+
*/
|
|
553
|
+
export interface DiffConfig {
|
|
554
|
+
/** Enable diffing for this request. */
|
|
555
|
+
enabled?: boolean;
|
|
556
|
+
/** Paths to exclude from comparison (e.g., 'body.timestamp'). */
|
|
557
|
+
exclude?: string[];
|
|
558
|
+
/** Match rules for dynamic values (path -> '*' or 'regex:pattern'). */
|
|
559
|
+
match?: Record<string, string>;
|
|
560
|
+
/** Include timing differences in comparison. Default: false */
|
|
561
|
+
includeTimings?: boolean;
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
/**
|
|
565
|
+
* Global configuration for response diffing.
|
|
566
|
+
*/
|
|
567
|
+
export interface GlobalDiffConfig extends DiffConfig {
|
|
568
|
+
/** Directory for baseline files. Default: '__baselines__' */
|
|
569
|
+
dir?: string;
|
|
570
|
+
/** Label for current run (e.g., 'staging', 'production'). */
|
|
571
|
+
label?: string;
|
|
572
|
+
/** Label to compare against. */
|
|
573
|
+
compareWith?: string;
|
|
574
|
+
/** Save current run as baseline. */
|
|
575
|
+
save?: boolean;
|
|
576
|
+
/** Output format for diff results. Default: 'terminal' */
|
|
577
|
+
outputFormat?: 'terminal' | 'json' | 'markdown';
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
/**
|
|
581
|
+
* Stored baseline data for a single request.
|
|
582
|
+
*/
|
|
583
|
+
export interface Baseline {
|
|
584
|
+
status?: number;
|
|
585
|
+
headers?: Record<string, string>;
|
|
586
|
+
body?: JsonValue;
|
|
587
|
+
timing?: number;
|
|
588
|
+
hash: string;
|
|
589
|
+
capturedAt: string;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
/**
|
|
593
|
+
* Baseline file format.
|
|
594
|
+
*/
|
|
595
|
+
export interface BaselineFile {
|
|
596
|
+
version: number;
|
|
597
|
+
label: string;
|
|
598
|
+
capturedAt: string;
|
|
599
|
+
baselines: Record<string, Baseline>;
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
/**
|
|
603
|
+
* Single difference in response comparison.
|
|
604
|
+
*/
|
|
605
|
+
export interface ResponseDiff {
|
|
606
|
+
path: string;
|
|
607
|
+
baseline: unknown;
|
|
608
|
+
current: unknown;
|
|
609
|
+
type: 'added' | 'removed' | 'changed' | 'type_mismatch';
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
/**
|
|
613
|
+
* Result of comparing a response against a baseline.
|
|
614
|
+
*/
|
|
615
|
+
export interface DiffCompareResult {
|
|
616
|
+
requestName: string;
|
|
617
|
+
hasDifferences: boolean;
|
|
618
|
+
isNewBaseline: boolean;
|
|
619
|
+
baselineLabel: string;
|
|
620
|
+
currentLabel: string;
|
|
621
|
+
differences: ResponseDiff[];
|
|
622
|
+
timingDiff?: {
|
|
623
|
+
baseline: number;
|
|
624
|
+
current: number;
|
|
625
|
+
changePercent: number;
|
|
626
|
+
};
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
/**
|
|
630
|
+
* Summary of diff comparison across all requests.
|
|
631
|
+
*/
|
|
632
|
+
export interface DiffSummary {
|
|
633
|
+
totalRequests: number;
|
|
634
|
+
unchanged: number;
|
|
635
|
+
changed: number;
|
|
636
|
+
newBaselines: number;
|
|
637
|
+
results: DiffCompareResult[];
|
|
638
|
+
}
|
package/src/utils/logger.ts
CHANGED
|
@@ -3,8 +3,10 @@ import type {
|
|
|
3
3
|
ExecutionResult,
|
|
4
4
|
ExecutionSummary,
|
|
5
5
|
GlobalConfig,
|
|
6
|
+
ProfileResult,
|
|
6
7
|
RequestConfig,
|
|
7
8
|
} from '../types/config';
|
|
9
|
+
import { generateHistogram } from './stats';
|
|
8
10
|
|
|
9
11
|
interface TreeNode {
|
|
10
12
|
label: string;
|
|
@@ -732,4 +734,123 @@ export class Logger {
|
|
|
732
734
|
);
|
|
733
735
|
console.log();
|
|
734
736
|
}
|
|
737
|
+
|
|
738
|
+
logProfileStart(
|
|
739
|
+
requestName: string,
|
|
740
|
+
iterations: number,
|
|
741
|
+
warmup: number,
|
|
742
|
+
concurrency: number,
|
|
743
|
+
): void {
|
|
744
|
+
if (!this.shouldShowOutput()) {
|
|
745
|
+
return;
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
console.log();
|
|
749
|
+
console.log(`${this.color('⚡ PROFILING', 'magenta')} ${this.color(requestName, 'bright')}`);
|
|
750
|
+
console.log(
|
|
751
|
+
this.color(
|
|
752
|
+
` ${iterations} iterations, ${warmup} warmup, concurrency: ${concurrency}`,
|
|
753
|
+
'dim',
|
|
754
|
+
),
|
|
755
|
+
);
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
logProfileResult(result: ProfileResult, showHistogram: boolean): void {
|
|
759
|
+
const { stats, request } = result;
|
|
760
|
+
const name = request.name || request.url;
|
|
761
|
+
|
|
762
|
+
if (this.config.format === 'json') {
|
|
763
|
+
console.log(
|
|
764
|
+
JSON.stringify({
|
|
765
|
+
request: { name, url: request.url, method: request.method || 'GET' },
|
|
766
|
+
stats: {
|
|
767
|
+
iterations: stats.iterations,
|
|
768
|
+
warmup: stats.warmup,
|
|
769
|
+
failures: stats.failures,
|
|
770
|
+
failureRate: stats.failureRate,
|
|
771
|
+
min: stats.min,
|
|
772
|
+
max: stats.max,
|
|
773
|
+
mean: stats.mean,
|
|
774
|
+
median: stats.median,
|
|
775
|
+
p50: stats.p50,
|
|
776
|
+
p95: stats.p95,
|
|
777
|
+
p99: stats.p99,
|
|
778
|
+
stdDev: stats.stdDev,
|
|
779
|
+
},
|
|
780
|
+
}),
|
|
781
|
+
);
|
|
782
|
+
return;
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
if (this.config.format === 'raw') {
|
|
786
|
+
// Raw format: just print the key stats
|
|
787
|
+
console.log(`${stats.p50}\t${stats.p95}\t${stats.p99}\t${stats.mean}`);
|
|
788
|
+
return;
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
// Pretty format
|
|
792
|
+
console.log();
|
|
793
|
+
const statusIcon = stats.failures === 0 ? this.color('✓', 'green') : this.color('⚠', 'yellow');
|
|
794
|
+
console.log(`${statusIcon} ${this.color(name, 'bright')}`);
|
|
795
|
+
|
|
796
|
+
// Latency stats table
|
|
797
|
+
console.log(this.color(' ┌─────────────────────────────────────┐', 'dim'));
|
|
798
|
+
console.log(
|
|
799
|
+
` │ ${this.color('p50', 'cyan')} ${this.formatLatency(stats.p50).padStart(10)} │ ${this.color('min', 'dim')} ${this.formatLatency(stats.min).padStart(10)} │`,
|
|
800
|
+
);
|
|
801
|
+
console.log(
|
|
802
|
+
` │ ${this.color('p95', 'yellow')} ${this.formatLatency(stats.p95).padStart(10)} │ ${this.color('max', 'dim')} ${this.formatLatency(stats.max).padStart(10)} │`,
|
|
803
|
+
);
|
|
804
|
+
console.log(
|
|
805
|
+
` │ ${this.color('p99', 'red')} ${this.formatLatency(stats.p99).padStart(10)} │ ${this.color('mean', 'dim')} ${this.formatLatency(stats.mean).padStart(10)} │`,
|
|
806
|
+
);
|
|
807
|
+
console.log(this.color(' └─────────────────────────────────────┘', 'dim'));
|
|
808
|
+
|
|
809
|
+
// Additional stats
|
|
810
|
+
console.log(
|
|
811
|
+
this.color(
|
|
812
|
+
` σ ${stats.stdDev.toFixed(2)}ms | ${stats.iterations} samples | ${stats.failures} failures (${stats.failureRate}%)`,
|
|
813
|
+
'dim',
|
|
814
|
+
),
|
|
815
|
+
);
|
|
816
|
+
|
|
817
|
+
// Optional histogram
|
|
818
|
+
if (showHistogram && stats.timings.length > 0) {
|
|
819
|
+
console.log();
|
|
820
|
+
console.log(this.color(' Distribution:', 'dim'));
|
|
821
|
+
const histogramLines = generateHistogram(stats.timings, 8, 30);
|
|
822
|
+
for (const line of histogramLines) {
|
|
823
|
+
console.log(` ${this.color(line, 'dim')}`);
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
private formatLatency(ms: number): string {
|
|
829
|
+
if (ms < 1) {
|
|
830
|
+
return `${(ms * 1000).toFixed(0)}µs`;
|
|
831
|
+
}
|
|
832
|
+
if (ms < 1000) {
|
|
833
|
+
return `${ms.toFixed(1)}ms`;
|
|
834
|
+
}
|
|
835
|
+
return `${(ms / 1000).toFixed(2)}s`;
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
logProfileSummary(results: ProfileResult[]): void {
|
|
839
|
+
if (!this.shouldShowOutput()) {
|
|
840
|
+
return;
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
const totalIterations = results.reduce((sum, r) => sum + r.stats.iterations, 0);
|
|
844
|
+
const totalFailures = results.reduce((sum, r) => sum + r.stats.failures, 0);
|
|
845
|
+
|
|
846
|
+
console.log();
|
|
847
|
+
console.log(this.color('─'.repeat(50), 'dim'));
|
|
848
|
+
console.log(
|
|
849
|
+
`${this.color('⚡ Profile Summary:', 'magenta')} ${results.length} request${results.length === 1 ? '' : 's'}, ${totalIterations} total iterations`,
|
|
850
|
+
);
|
|
851
|
+
|
|
852
|
+
if (totalFailures > 0) {
|
|
853
|
+
console.log(this.color(` ${totalFailures} total failures`, 'yellow'));
|
|
854
|
+
}
|
|
855
|
+
}
|
|
735
856
|
}
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import { describe, expect, test } from 'bun:test';
|
|
2
|
+
import {
|
|
3
|
+
calculateMean,
|
|
4
|
+
calculatePercentile,
|
|
5
|
+
calculateProfileStats,
|
|
6
|
+
calculateStdDev,
|
|
7
|
+
exportToCSV,
|
|
8
|
+
exportToJSON,
|
|
9
|
+
generateHistogram,
|
|
10
|
+
} from './stats';
|
|
11
|
+
|
|
12
|
+
describe('calculatePercentile', () => {
|
|
13
|
+
test('returns 0 for empty array', () => {
|
|
14
|
+
expect(calculatePercentile([], 50)).toBe(0);
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
test('returns single value for array of 1', () => {
|
|
18
|
+
expect(calculatePercentile([100], 50)).toBe(100);
|
|
19
|
+
expect(calculatePercentile([100], 99)).toBe(100);
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
test('calculates p50 (median) correctly', () => {
|
|
23
|
+
expect(calculatePercentile([1, 2, 3, 4, 5], 50)).toBe(3);
|
|
24
|
+
expect(calculatePercentile([1, 2, 3, 4], 50)).toBe(2.5);
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
test('calculates p95 correctly', () => {
|
|
28
|
+
const values = Array.from({ length: 100 }, (_, i) => i + 1);
|
|
29
|
+
expect(calculatePercentile(values, 95)).toBeCloseTo(95.05, 1);
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
test('calculates p99 correctly', () => {
|
|
33
|
+
const values = Array.from({ length: 100 }, (_, i) => i + 1);
|
|
34
|
+
expect(calculatePercentile(values, 99)).toBeCloseTo(99.01, 1);
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
test('handles unsorted input (requires pre-sorting)', () => {
|
|
38
|
+
// Note: function expects sorted input
|
|
39
|
+
const sorted = [10, 20, 30, 40, 50].sort((a, b) => a - b);
|
|
40
|
+
expect(calculatePercentile(sorted, 50)).toBe(30);
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
describe('calculateMean', () => {
|
|
45
|
+
test('returns 0 for empty array', () => {
|
|
46
|
+
expect(calculateMean([])).toBe(0);
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
test('calculates mean correctly', () => {
|
|
50
|
+
expect(calculateMean([1, 2, 3, 4, 5])).toBe(3);
|
|
51
|
+
expect(calculateMean([10, 20, 30])).toBe(20);
|
|
52
|
+
expect(calculateMean([100])).toBe(100);
|
|
53
|
+
});
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
describe('calculateStdDev', () => {
|
|
57
|
+
test('returns 0 for empty array', () => {
|
|
58
|
+
expect(calculateStdDev([], 0)).toBe(0);
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
test('returns 0 for single value', () => {
|
|
62
|
+
expect(calculateStdDev([100], 100)).toBe(0);
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
test('calculates standard deviation correctly', () => {
|
|
66
|
+
const values = [2, 4, 4, 4, 5, 5, 7, 9];
|
|
67
|
+
const mean = calculateMean(values);
|
|
68
|
+
expect(calculateStdDev(values, mean)).toBeCloseTo(2, 0);
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
describe('calculateProfileStats', () => {
|
|
73
|
+
test('calculates stats correctly with no warmup', () => {
|
|
74
|
+
const timings = [10, 20, 30, 40, 50];
|
|
75
|
+
const stats = calculateProfileStats(timings, 0, 0);
|
|
76
|
+
|
|
77
|
+
expect(stats.iterations).toBe(5);
|
|
78
|
+
expect(stats.warmup).toBe(0);
|
|
79
|
+
expect(stats.min).toBe(10);
|
|
80
|
+
expect(stats.max).toBe(50);
|
|
81
|
+
expect(stats.mean).toBe(30);
|
|
82
|
+
expect(stats.failures).toBe(0);
|
|
83
|
+
expect(stats.failureRate).toBe(0);
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
test('excludes warmup iterations from stats', () => {
|
|
87
|
+
const timings = [100, 10, 20, 30, 40]; // First value is warmup outlier
|
|
88
|
+
const stats = calculateProfileStats(timings, 1, 0);
|
|
89
|
+
|
|
90
|
+
expect(stats.iterations).toBe(4);
|
|
91
|
+
expect(stats.warmup).toBe(1);
|
|
92
|
+
expect(stats.min).toBe(10);
|
|
93
|
+
expect(stats.max).toBe(40);
|
|
94
|
+
expect(stats.mean).toBe(25);
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
test('calculates failure rate correctly', () => {
|
|
98
|
+
const timings = [10, 20, 30];
|
|
99
|
+
const stats = calculateProfileStats(timings, 0, 2);
|
|
100
|
+
|
|
101
|
+
expect(stats.failures).toBe(2);
|
|
102
|
+
expect(stats.failureRate).toBeCloseTo(66.67, 1);
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
test('handles empty timings', () => {
|
|
106
|
+
const stats = calculateProfileStats([], 0, 0);
|
|
107
|
+
|
|
108
|
+
expect(stats.iterations).toBe(0);
|
|
109
|
+
expect(stats.min).toBe(0);
|
|
110
|
+
expect(stats.max).toBe(0);
|
|
111
|
+
expect(stats.mean).toBe(0);
|
|
112
|
+
});
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
describe('generateHistogram', () => {
|
|
116
|
+
test('returns "No data" for empty array', () => {
|
|
117
|
+
const result = generateHistogram([]);
|
|
118
|
+
expect(result).toEqual(['No data']);
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
test('generates histogram with correct bucket count', () => {
|
|
122
|
+
const timings = Array.from({ length: 100 }, (_, i) => i);
|
|
123
|
+
const result = generateHistogram(timings, 5, 20);
|
|
124
|
+
|
|
125
|
+
expect(result.length).toBe(5);
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
test('histogram lines contain bucket ranges', () => {
|
|
129
|
+
const timings = [10, 20, 30, 40, 50];
|
|
130
|
+
const result = generateHistogram(timings, 2, 10);
|
|
131
|
+
|
|
132
|
+
expect(result[0]).toContain('ms -');
|
|
133
|
+
expect(result[0]).toContain('ms │');
|
|
134
|
+
});
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
describe('exportToCSV', () => {
|
|
138
|
+
test('exports stats to CSV format', () => {
|
|
139
|
+
const stats = calculateProfileStats([10, 20, 30], 0, 0);
|
|
140
|
+
const csv = exportToCSV(stats, 'Test Request');
|
|
141
|
+
|
|
142
|
+
expect(csv).toContain('iteration,latency_ms');
|
|
143
|
+
expect(csv).toContain('1,10');
|
|
144
|
+
expect(csv).toContain('2,20');
|
|
145
|
+
expect(csv).toContain('3,30');
|
|
146
|
+
});
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
describe('exportToJSON', () => {
|
|
150
|
+
test('exports stats to JSON format', () => {
|
|
151
|
+
const stats = calculateProfileStats([10, 20, 30], 0, 0);
|
|
152
|
+
const json = exportToJSON(stats, 'Test Request');
|
|
153
|
+
const parsed = JSON.parse(json);
|
|
154
|
+
|
|
155
|
+
expect(parsed.request).toBe('Test Request');
|
|
156
|
+
expect(parsed.summary.iterations).toBe(3);
|
|
157
|
+
expect(parsed.summary.min).toBe(10);
|
|
158
|
+
expect(parsed.summary.max).toBe(30);
|
|
159
|
+
expect(parsed.timings).toEqual([10, 20, 30]);
|
|
160
|
+
});
|
|
161
|
+
});
|