benchforge 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/HeapSampler-BX3de22o.mjs +54 -0
- package/dist/HeapSampler-BX3de22o.mjs.map +1 -0
- package/dist/TimingUtils-D4z1jpp2.mjs +599 -0
- package/dist/TimingUtils-D4z1jpp2.mjs.map +1 -0
- package/dist/bin/benchforge.mjs +1 -1
- package/dist/index.mjs +2 -1
- package/dist/runners/WorkerScript.mjs +158 -0
- package/dist/runners/WorkerScript.mjs.map +1 -0
- package/dist/{src-CGuaC3Wo.mjs → src-cYpHvc40.mjs} +14 -601
- package/dist/src-cYpHvc40.mjs.map +1 -0
- package/package.json +1 -1
- package/src/runners/RunnerOrchestrator.ts +11 -7
- package/dist/src-CGuaC3Wo.mjs.map +0 -1
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { Session } from "node:inspector/promises";
|
|
2
|
+
|
|
3
|
+
//#region src/heap-sample/HeapSampler.ts
|
|
4
|
+
const defaultOptions = {
|
|
5
|
+
samplingInterval: 32768,
|
|
6
|
+
stackDepth: 64,
|
|
7
|
+
includeMinorGC: true,
|
|
8
|
+
includeMajorGC: true
|
|
9
|
+
};
|
|
10
|
+
/** Run a function while sampling heap allocations, return profile */
|
|
11
|
+
async function withHeapSampling(options, fn) {
|
|
12
|
+
const opts = {
|
|
13
|
+
...defaultOptions,
|
|
14
|
+
...options
|
|
15
|
+
};
|
|
16
|
+
const session = new Session();
|
|
17
|
+
session.connect();
|
|
18
|
+
try {
|
|
19
|
+
await startSampling(session, opts);
|
|
20
|
+
return {
|
|
21
|
+
result: await fn(),
|
|
22
|
+
profile: await stopSampling(session)
|
|
23
|
+
};
|
|
24
|
+
} finally {
|
|
25
|
+
session.disconnect();
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
/** Start heap sampling, falling back if include-collected params aren't supported */
|
|
29
|
+
async function startSampling(session, opts) {
|
|
30
|
+
const { samplingInterval, stackDepth } = opts;
|
|
31
|
+
const base = {
|
|
32
|
+
samplingInterval,
|
|
33
|
+
stackDepth
|
|
34
|
+
};
|
|
35
|
+
const params = {
|
|
36
|
+
...base,
|
|
37
|
+
includeObjectsCollectedByMinorGC: opts.includeMinorGC,
|
|
38
|
+
includeObjectsCollectedByMajorGC: opts.includeMajorGC
|
|
39
|
+
};
|
|
40
|
+
try {
|
|
41
|
+
await session.post("HeapProfiler.startSampling", params);
|
|
42
|
+
} catch {
|
|
43
|
+
console.warn("HeapProfiler: include-collected params not supported, falling back");
|
|
44
|
+
await session.post("HeapProfiler.startSampling", base);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
async function stopSampling(session) {
|
|
48
|
+
const { profile } = await session.post("HeapProfiler.stopSampling");
|
|
49
|
+
return profile;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
//#endregion
|
|
53
|
+
export { withHeapSampling };
|
|
54
|
+
//# sourceMappingURL=HeapSampler-BX3de22o.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"HeapSampler-BX3de22o.mjs","names":[],"sources":["../src/heap-sample/HeapSampler.ts"],"sourcesContent":["import { Session } from \"node:inspector/promises\";\n\nexport interface HeapSampleOptions {\n samplingInterval?: number; // bytes between samples, default 32768\n stackDepth?: number; // max stack frames, default 64\n includeMinorGC?: boolean; // keep objects collected by minor GC, default true\n includeMajorGC?: boolean; // keep objects collected by major GC, default true\n}\n\nexport interface ProfileNode {\n callFrame: {\n functionName: string;\n url: string;\n lineNumber: number;\n columnNumber?: number;\n };\n selfSize: number;\n children?: ProfileNode[];\n}\n\nexport interface HeapProfile {\n head: ProfileNode;\n samples?: number[]; // sample IDs (length = number of samples taken)\n}\n\nconst defaultOptions: Required<HeapSampleOptions> = {\n samplingInterval: 32768,\n stackDepth: 64,\n includeMinorGC: true,\n includeMajorGC: true,\n};\n\n/** Run a function while sampling heap allocations, return profile */\nexport async function withHeapSampling<T>(\n options: HeapSampleOptions,\n fn: () => Promise<T> | T,\n): Promise<{ result: T; profile: HeapProfile }> {\n const opts = { ...defaultOptions, ...options };\n const session = new Session();\n session.connect();\n\n try {\n await startSampling(session, opts);\n const result = await fn();\n const profile = await stopSampling(session);\n return { result, profile };\n } finally {\n session.disconnect();\n }\n}\n\n/** Start heap sampling, falling back if include-collected params aren't supported */\nasync function startSampling(\n session: Session,\n opts: Required<HeapSampleOptions>,\n): Promise<void> {\n const { samplingInterval, stackDepth } = opts;\n const base = { samplingInterval, stackDepth };\n const params = {\n ...base,\n includeObjectsCollectedByMinorGC: opts.includeMinorGC,\n includeObjectsCollectedByMajorGC: opts.includeMajorGC,\n };\n\n try {\n await session.post(\"HeapProfiler.startSampling\", params);\n } catch {\n console.warn(\n \"HeapProfiler: include-collected params not supported, falling back\",\n );\n await session.post(\"HeapProfiler.startSampling\", base);\n }\n}\n\nasync function stopSampling(session: Session): Promise<HeapProfile> {\n const { profile } = await session.post(\"HeapProfiler.stopSampling\");\n return profile as HeapProfile;\n}\n"],"mappings":";;;AAyBA,MAAM,iBAA8C;CAClD,kBAAkB;CAClB,YAAY;CACZ,gBAAgB;CAChB,gBAAgB;CACjB;;AAGD,eAAsB,iBACpB,SACA,IAC8C;CAC9C,MAAM,OAAO;EAAE,GAAG;EAAgB,GAAG;EAAS;CAC9C,MAAM,UAAU,IAAI,SAAS;AAC7B,SAAQ,SAAS;AAEjB,KAAI;AACF,QAAM,cAAc,SAAS,KAAK;AAGlC,SAAO;GAAE,QAFM,MAAM,IAAI;GAER,SADD,MAAM,aAAa,QAAQ;GACjB;WAClB;AACR,UAAQ,YAAY;;;;AAKxB,eAAe,cACb,SACA,MACe;CACf,MAAM,EAAE,kBAAkB,eAAe;CACzC,MAAM,OAAO;EAAE;EAAkB;EAAY;CAC7C,MAAM,SAAS;EACb,GAAG;EACH,kCAAkC,KAAK;EACvC,kCAAkC,KAAK;EACxC;AAED,KAAI;AACF,QAAM,QAAQ,KAAK,8BAA8B,OAAO;SAClD;AACN,UAAQ,KACN,qEACD;AACD,QAAM,QAAQ,KAAK,8BAA8B,KAAK;;;AAI1D,eAAe,aAAa,SAAwC;CAClE,MAAM,EAAE,YAAY,MAAM,QAAQ,KAAK,4BAA4B;AACnE,QAAO"}
|
|
@@ -0,0 +1,599 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import { fileURLToPath } from "node:url";
|
|
3
|
+
import { getHeapStatistics } from "node:v8";
|
|
4
|
+
|
|
5
|
+
//#region src/matrix/VariantLoader.ts
|
|
6
|
+
/** Discover variant ids from a directory of .ts files */
|
|
7
|
+
async function discoverVariants(dirUrl) {
|
|
8
|
+
const dirPath = fileURLToPath(dirUrl);
|
|
9
|
+
return (await fs.readdir(dirPath, { withFileTypes: true })).filter((e) => e.isFile() && e.name.endsWith(".ts")).map((e) => e.name.slice(0, -3)).sort();
|
|
10
|
+
}
|
|
11
|
+
/** Get module URL for a variant in a directory */
|
|
12
|
+
function variantModuleUrl(dirUrl, variantId) {
|
|
13
|
+
return new URL(`${variantId}.ts`, dirUrl).href;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
//#endregion
|
|
17
|
+
//#region src/StatisticalUtils.ts
|
|
18
|
+
const bootstrapSamples = 1e4;
|
|
19
|
+
const confidence = .95;
|
|
20
|
+
/** @return relative standard deviation (coefficient of variation) */
|
|
21
|
+
function coefficientOfVariation(samples) {
|
|
22
|
+
const mean = average(samples);
|
|
23
|
+
if (mean === 0) return 0;
|
|
24
|
+
return standardDeviation(samples) / mean;
|
|
25
|
+
}
|
|
26
|
+
/** @return median absolute deviation for robust variability measure */
|
|
27
|
+
function medianAbsoluteDeviation(samples) {
|
|
28
|
+
const median = percentile$1(samples, .5);
|
|
29
|
+
return percentile$1(samples.map((x) => Math.abs(x - median)), .5);
|
|
30
|
+
}
|
|
31
|
+
/** @return mean of values */
|
|
32
|
+
function average(values) {
|
|
33
|
+
return values.reduce((a, b) => a + b, 0) / values.length;
|
|
34
|
+
}
|
|
35
|
+
/** @return standard deviation with Bessel's correction */
|
|
36
|
+
function standardDeviation(samples) {
|
|
37
|
+
if (samples.length <= 1) return 0;
|
|
38
|
+
const mean = average(samples);
|
|
39
|
+
const variance = samples.reduce((sum, x) => sum + (x - mean) ** 2, 0) / (samples.length - 1);
|
|
40
|
+
return Math.sqrt(variance);
|
|
41
|
+
}
|
|
42
|
+
/** @return value at percentile p (0-1) */
|
|
43
|
+
function percentile$1(values, p) {
|
|
44
|
+
const sorted = [...values].sort((a, b) => a - b);
|
|
45
|
+
const index = Math.ceil(sorted.length * p) - 1;
|
|
46
|
+
return sorted[Math.max(0, index)];
|
|
47
|
+
}
|
|
48
|
+
/** @return bootstrap resample with replacement */
|
|
49
|
+
function createResample(samples) {
|
|
50
|
+
const n = samples.length;
|
|
51
|
+
const rand = () => samples[Math.floor(Math.random() * n)];
|
|
52
|
+
return Array.from({ length: n }, rand);
|
|
53
|
+
}
|
|
54
|
+
/** @return confidence interval [lower, upper] */
|
|
55
|
+
function computeInterval(medians, confidence) {
|
|
56
|
+
const alpha = (1 - confidence) / 2;
|
|
57
|
+
return [percentile$1(medians, alpha), percentile$1(medians, 1 - alpha)];
|
|
58
|
+
}
|
|
59
|
+
/** Bin values into histogram for compact visualization */
|
|
60
|
+
function binValues(values, binCount = 30) {
|
|
61
|
+
const sorted = [...values].sort((a, b) => a - b);
|
|
62
|
+
const min = sorted[0];
|
|
63
|
+
const max = sorted[sorted.length - 1];
|
|
64
|
+
if (min === max) return [{
|
|
65
|
+
x: min,
|
|
66
|
+
count: values.length
|
|
67
|
+
}];
|
|
68
|
+
const step = (max - min) / binCount;
|
|
69
|
+
const counts = new Array(binCount).fill(0);
|
|
70
|
+
for (const v of values) {
|
|
71
|
+
const bin = Math.min(Math.floor((v - min) / step), binCount - 1);
|
|
72
|
+
counts[bin]++;
|
|
73
|
+
}
|
|
74
|
+
return counts.map((count, i) => ({
|
|
75
|
+
x: min + (i + .5) * step,
|
|
76
|
+
count
|
|
77
|
+
}));
|
|
78
|
+
}
|
|
79
|
+
/** @return bootstrap CI for percentage difference between baseline and current medians */
|
|
80
|
+
function bootstrapDifferenceCI(baseline, current, options = {}) {
|
|
81
|
+
const { resamples = bootstrapSamples, confidence: conf = confidence } = options;
|
|
82
|
+
const baselineMedian = percentile$1(baseline, .5);
|
|
83
|
+
const observedPercent = (percentile$1(current, .5) - baselineMedian) / baselineMedian * 100;
|
|
84
|
+
const diffs = [];
|
|
85
|
+
for (let i = 0; i < resamples; i++) {
|
|
86
|
+
const resB = createResample(baseline);
|
|
87
|
+
const resC = createResample(current);
|
|
88
|
+
const medB = percentile$1(resB, .5);
|
|
89
|
+
const medC = percentile$1(resC, .5);
|
|
90
|
+
diffs.push((medC - medB) / medB * 100);
|
|
91
|
+
}
|
|
92
|
+
const ci = computeInterval(diffs, conf);
|
|
93
|
+
const excludesZero = ci[0] > 0 || ci[1] < 0;
|
|
94
|
+
let direction = "uncertain";
|
|
95
|
+
if (excludesZero) direction = observedPercent < 0 ? "faster" : "slower";
|
|
96
|
+
const histogram = binValues(diffs);
|
|
97
|
+
return {
|
|
98
|
+
percent: observedPercent,
|
|
99
|
+
ci,
|
|
100
|
+
direction,
|
|
101
|
+
histogram
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
//#endregion
|
|
106
|
+
//#region src/runners/RunnerUtils.ts
|
|
107
|
+
const msToNs = 1e6;
|
|
108
|
+
|
|
109
|
+
//#endregion
|
|
110
|
+
//#region src/runners/AdaptiveWrapper.ts
|
|
111
|
+
const minTime = 1e3;
|
|
112
|
+
const maxTime = 1e4;
|
|
113
|
+
const targetConfidence = 95;
|
|
114
|
+
const fallbackThreshold = 80;
|
|
115
|
+
const windowSize = 50;
|
|
116
|
+
const stability = .05;
|
|
117
|
+
const initialBatch = 100;
|
|
118
|
+
const continueBatch = 100;
|
|
119
|
+
const continueIterations = 10;
|
|
120
|
+
/** @return adaptive sampling runner wrapper */
|
|
121
|
+
function createAdaptiveWrapper(baseRunner, options) {
|
|
122
|
+
return { async runBench(benchmark, runnerOptions, params) {
|
|
123
|
+
return runAdaptiveBench(baseRunner, benchmark, runnerOptions, options, params);
|
|
124
|
+
} };
|
|
125
|
+
}
|
|
126
|
+
/** @return results using adaptive sampling strategy */
|
|
127
|
+
async function runAdaptiveBench(baseRunner, benchmark, runnerOptions, options, params) {
|
|
128
|
+
const { minTime: min = options.minTime ?? minTime, maxTime: max = options.maxTime ?? maxTime, targetConfidence: target = options.convergence ?? targetConfidence } = runnerOptions;
|
|
129
|
+
const allSamples = [];
|
|
130
|
+
const warmup = await collectInitial(baseRunner, benchmark, runnerOptions, params, allSamples);
|
|
131
|
+
const startTime = performance.now();
|
|
132
|
+
await collectAdaptive(baseRunner, benchmark, runnerOptions, params, allSamples, {
|
|
133
|
+
minTime: min,
|
|
134
|
+
maxTime: max,
|
|
135
|
+
targetConfidence: target,
|
|
136
|
+
startTime
|
|
137
|
+
});
|
|
138
|
+
return buildResults(allSamples, startTime, checkConvergence(allSamples.map((s) => s * msToNs)), benchmark.name, warmup);
|
|
139
|
+
}
|
|
140
|
+
/** @return warmupSamples from initial batch */
|
|
141
|
+
async function collectInitial(baseRunner, benchmark, runnerOptions, params, allSamples) {
|
|
142
|
+
const opts = {
|
|
143
|
+
...runnerOptions,
|
|
144
|
+
maxTime: initialBatch,
|
|
145
|
+
maxIterations: void 0
|
|
146
|
+
};
|
|
147
|
+
const results = await baseRunner.runBench(benchmark, opts, params);
|
|
148
|
+
appendSamples(results[0], allSamples);
|
|
149
|
+
return results[0].warmupSamples;
|
|
150
|
+
}
|
|
151
|
+
/** @return samples until convergence or timeout */
|
|
152
|
+
async function collectAdaptive(baseRunner, benchmark, runnerOptions, params, allSamples, limits) {
|
|
153
|
+
const { minTime, maxTime, targetConfidence, startTime } = limits;
|
|
154
|
+
let lastLog = 0;
|
|
155
|
+
while (performance.now() - startTime < maxTime) {
|
|
156
|
+
const convergence = checkConvergence(allSamples.map((s) => s * msToNs));
|
|
157
|
+
const elapsed = performance.now() - startTime;
|
|
158
|
+
if (elapsed - lastLog > 1e3) {
|
|
159
|
+
const elapsedSec = (elapsed / 1e3).toFixed(1);
|
|
160
|
+
const conf = convergence.confidence.toFixed(0);
|
|
161
|
+
process.stderr.write(`\r◊ ${benchmark.name}: ${conf}% confident (${elapsedSec}s) `);
|
|
162
|
+
lastLog = elapsed;
|
|
163
|
+
}
|
|
164
|
+
if (shouldStop(convergence, targetConfidence, elapsed, minTime)) break;
|
|
165
|
+
const opts = {
|
|
166
|
+
...runnerOptions,
|
|
167
|
+
maxTime: continueBatch,
|
|
168
|
+
maxIterations: continueIterations,
|
|
169
|
+
skipWarmup: true
|
|
170
|
+
};
|
|
171
|
+
appendSamples((await baseRunner.runBench(benchmark, opts, params))[0], allSamples);
|
|
172
|
+
}
|
|
173
|
+
process.stderr.write("\r" + " ".repeat(60) + "\r");
|
|
174
|
+
}
|
|
175
|
+
/** Append samples one-by-one to avoid stack overflow from spread on large arrays */
|
|
176
|
+
function appendSamples(result, samples) {
|
|
177
|
+
if (!result.samples?.length) return;
|
|
178
|
+
for (const sample of result.samples) samples.push(sample);
|
|
179
|
+
}
|
|
180
|
+
/** @return true if convergence reached or timeout */
|
|
181
|
+
function shouldStop(convergence, targetConfidence, elapsedTime, minTime) {
|
|
182
|
+
if (convergence.converged && convergence.confidence >= targetConfidence) return true;
|
|
183
|
+
const threshold = Math.max(targetConfidence, fallbackThreshold);
|
|
184
|
+
return elapsedTime >= minTime && convergence.confidence >= threshold;
|
|
185
|
+
}
|
|
186
|
+
/** @return measured results with convergence metrics */
|
|
187
|
+
function buildResults(samplesMs, startTime, convergence, name, warmupSamples) {
|
|
188
|
+
const totalTime = (performance.now() - startTime) / 1e3;
|
|
189
|
+
return [{
|
|
190
|
+
name,
|
|
191
|
+
samples: samplesMs,
|
|
192
|
+
warmupSamples,
|
|
193
|
+
time: computeTimeStats(samplesMs.map((s) => s * msToNs)),
|
|
194
|
+
totalTime,
|
|
195
|
+
convergence
|
|
196
|
+
}];
|
|
197
|
+
}
|
|
198
|
+
/** @return time percentiles and statistics in ms */
|
|
199
|
+
function computeTimeStats(samplesNs) {
|
|
200
|
+
const samplesMs = samplesNs.map((s) => s / msToNs);
|
|
201
|
+
const { min, max, sum } = getMinMaxSum(samplesNs);
|
|
202
|
+
const percentiles = getPercentiles(samplesNs);
|
|
203
|
+
const robust = getRobustMetrics(samplesMs);
|
|
204
|
+
return {
|
|
205
|
+
min: min / msToNs,
|
|
206
|
+
max: max / msToNs,
|
|
207
|
+
avg: sum / samplesNs.length / msToNs,
|
|
208
|
+
...percentiles,
|
|
209
|
+
...robust
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
/** @return min, max, sum of samples */
|
|
213
|
+
function getMinMaxSum(samples) {
|
|
214
|
+
return {
|
|
215
|
+
min: samples.reduce((a, b) => Math.min(a, b), Number.POSITIVE_INFINITY),
|
|
216
|
+
max: samples.reduce((a, b) => Math.max(a, b), Number.NEGATIVE_INFINITY),
|
|
217
|
+
sum: samples.reduce((a, b) => a + b, 0)
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
/** @return percentiles in ms */
|
|
221
|
+
function getPercentiles(samples) {
|
|
222
|
+
return {
|
|
223
|
+
p25: percentile$1(samples, .25) / msToNs,
|
|
224
|
+
p50: percentile$1(samples, .5) / msToNs,
|
|
225
|
+
p75: percentile$1(samples, .75) / msToNs,
|
|
226
|
+
p95: percentile$1(samples, .95) / msToNs,
|
|
227
|
+
p99: percentile$1(samples, .99) / msToNs,
|
|
228
|
+
p999: percentile$1(samples, .999) / msToNs
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
/** @return robust variability metrics */
|
|
232
|
+
function getRobustMetrics(samplesMs) {
|
|
233
|
+
const impact = getOutlierImpact(samplesMs);
|
|
234
|
+
return {
|
|
235
|
+
cv: coefficientOfVariation(samplesMs),
|
|
236
|
+
mad: medianAbsoluteDeviation(samplesMs),
|
|
237
|
+
outlierRate: impact.ratio
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
/** @return outlier impact as proportion of total time */
|
|
241
|
+
function getOutlierImpact(samples) {
|
|
242
|
+
if (samples.length === 0) return {
|
|
243
|
+
ratio: 0,
|
|
244
|
+
count: 0
|
|
245
|
+
};
|
|
246
|
+
const median = percentile$1(samples, .5);
|
|
247
|
+
const threshold = median + 1.5 * (percentile$1(samples, .75) - median);
|
|
248
|
+
let excessTime = 0;
|
|
249
|
+
let count = 0;
|
|
250
|
+
for (const sample of samples) if (sample > threshold) {
|
|
251
|
+
excessTime += sample - median;
|
|
252
|
+
count++;
|
|
253
|
+
}
|
|
254
|
+
const totalTime = samples.reduce((a, b) => a + b, 0);
|
|
255
|
+
return {
|
|
256
|
+
ratio: totalTime > 0 ? excessTime / totalTime : 0,
|
|
257
|
+
count
|
|
258
|
+
};
|
|
259
|
+
}
|
|
260
|
+
/** @return convergence based on window stability */
|
|
261
|
+
function checkConvergence(samples) {
|
|
262
|
+
const windowSize = getWindowSize(samples);
|
|
263
|
+
const minSamples = windowSize * 2;
|
|
264
|
+
if (samples.length < minSamples) return buildProgressResult(samples.length, minSamples);
|
|
265
|
+
return buildConvergence(getStability(samples, windowSize));
|
|
266
|
+
}
|
|
267
|
+
/** @return progress when samples insufficient */
|
|
268
|
+
function buildProgressResult(currentSamples, minSamples) {
|
|
269
|
+
return {
|
|
270
|
+
converged: false,
|
|
271
|
+
confidence: currentSamples / minSamples * 100,
|
|
272
|
+
reason: `Collecting samples: ${currentSamples}/${minSamples}`
|
|
273
|
+
};
|
|
274
|
+
}
|
|
275
|
+
/** @return stability metrics between windows */
|
|
276
|
+
function getStability(samples, windowSize) {
|
|
277
|
+
const recent = samples.slice(-windowSize);
|
|
278
|
+
const previous = samples.slice(-windowSize * 2, -windowSize);
|
|
279
|
+
const recentMs = recent.map((s) => s / msToNs);
|
|
280
|
+
const previousMs = previous.map((s) => s / msToNs);
|
|
281
|
+
const medianRecent = percentile$1(recentMs, .5);
|
|
282
|
+
const medianPrevious = percentile$1(previousMs, .5);
|
|
283
|
+
const medianDrift = Math.abs(medianRecent - medianPrevious) / medianPrevious;
|
|
284
|
+
const impactRecent = getOutlierImpact(recentMs);
|
|
285
|
+
const impactPrevious = getOutlierImpact(previousMs);
|
|
286
|
+
const impactDrift = Math.abs(impactRecent.ratio - impactPrevious.ratio);
|
|
287
|
+
return {
|
|
288
|
+
medianDrift,
|
|
289
|
+
impactDrift,
|
|
290
|
+
medianStable: medianDrift < stability,
|
|
291
|
+
impactStable: impactDrift < stability
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
/** @return convergence from stability metrics */
|
|
295
|
+
function buildConvergence(metrics) {
|
|
296
|
+
const { medianDrift, impactDrift, medianStable, impactStable } = metrics;
|
|
297
|
+
if (medianStable && impactStable) return {
|
|
298
|
+
converged: true,
|
|
299
|
+
confidence: 100,
|
|
300
|
+
reason: "Stable performance pattern"
|
|
301
|
+
};
|
|
302
|
+
const confidence = Math.min(100, (1 - medianDrift / stability) * 50 + (1 - impactDrift / stability) * 50);
|
|
303
|
+
const reason = medianDrift > impactDrift ? `Median drifting: ${(medianDrift * 100).toFixed(1)}%` : `Outlier impact changing: ${(impactDrift * 100).toFixed(1)}%`;
|
|
304
|
+
return {
|
|
305
|
+
converged: false,
|
|
306
|
+
confidence: Math.max(0, confidence),
|
|
307
|
+
reason
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
/** @return window size scaled to execution time */
|
|
311
|
+
function getWindowSize(samples) {
|
|
312
|
+
if (samples.length < 20) return windowSize;
|
|
313
|
+
const recentMedian = percentile$1(samples.slice(-20).map((s) => s / msToNs), .5);
|
|
314
|
+
if (recentMedian < .01) return 200;
|
|
315
|
+
if (recentMedian < .1) return 100;
|
|
316
|
+
if (recentMedian < 1) return 50;
|
|
317
|
+
if (recentMedian < 10) return 30;
|
|
318
|
+
return 20;
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
//#endregion
|
|
322
|
+
//#region src/runners/BenchRunner.ts
|
|
323
|
+
/** Execute benchmark with optional parameters */
|
|
324
|
+
function executeBenchmark(benchmark, params) {
|
|
325
|
+
benchmark.fn(params);
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
//#endregion
|
|
329
|
+
//#region src/runners/BasicRunner.ts
|
|
330
|
+
/**
|
|
331
|
+
* Wait time after gc() for V8 to stabilize (ms).
|
|
332
|
+
*
|
|
333
|
+
* V8 has 4 compilation tiers: Ignition (interpreter) -> Sparkplug (baseline) ->
|
|
334
|
+
* Maglev (mid-tier optimizer) -> TurboFan (full optimizer). Tiering thresholds:
|
|
335
|
+
* - Ignition -> Sparkplug: 8 invocations
|
|
336
|
+
* - Sparkplug -> Maglev: 500 invocations
|
|
337
|
+
* - Maglev -> TurboFan: 6000 invocations
|
|
338
|
+
*
|
|
339
|
+
* Optimization compilation happens on background threads and requires idle time
|
|
340
|
+
* on the main thread to complete. Without sufficient warmup + settle time,
|
|
341
|
+
* benchmarks exhibit bimodal timing: slow Sparkplug samples (~30% slower) mixed
|
|
342
|
+
* with fast optimized samples.
|
|
343
|
+
*
|
|
344
|
+
* The warmup iterations trigger the optimization decision, then gcSettleTime
|
|
345
|
+
* provides idle time for background compilation to finish before measurement.
|
|
346
|
+
*
|
|
347
|
+
* @see https://v8.dev/blog/sparkplug
|
|
348
|
+
* @see https://v8.dev/blog/maglev
|
|
349
|
+
* @see https://v8.dev/blog/background-compilation
|
|
350
|
+
*/
|
|
351
|
+
const gcSettleTime = 1e3;
|
|
352
|
+
/** @return runner with time and iteration limits */
|
|
353
|
+
var BasicRunner = class {
|
|
354
|
+
async runBench(benchmark, options, params) {
|
|
355
|
+
const collected = await collectSamples({
|
|
356
|
+
benchmark,
|
|
357
|
+
params,
|
|
358
|
+
...defaultCollectOptions,
|
|
359
|
+
...options
|
|
360
|
+
});
|
|
361
|
+
return [buildMeasuredResults(benchmark.name, collected)];
|
|
362
|
+
}
|
|
363
|
+
};
|
|
364
|
+
const defaultCollectOptions = {
|
|
365
|
+
maxTime: 5e3,
|
|
366
|
+
maxIterations: 1e6,
|
|
367
|
+
warmup: 0,
|
|
368
|
+
traceOpt: false,
|
|
369
|
+
noSettle: false
|
|
370
|
+
};
|
|
371
|
+
function buildMeasuredResults(name, c) {
|
|
372
|
+
const time = computeStats(c.samples);
|
|
373
|
+
const convergence = checkConvergence(c.samples.map((s) => s * msToNs));
|
|
374
|
+
return {
|
|
375
|
+
name,
|
|
376
|
+
samples: c.samples,
|
|
377
|
+
warmupSamples: c.warmupSamples,
|
|
378
|
+
heapSamples: c.heapSamples,
|
|
379
|
+
timestamps: c.timestamps,
|
|
380
|
+
time,
|
|
381
|
+
heapSize: {
|
|
382
|
+
avg: c.heapGrowth,
|
|
383
|
+
min: c.heapGrowth,
|
|
384
|
+
max: c.heapGrowth
|
|
385
|
+
},
|
|
386
|
+
convergence,
|
|
387
|
+
optStatus: c.optStatus,
|
|
388
|
+
optSamples: c.optSamples,
|
|
389
|
+
pausePoints: c.pausePoints
|
|
390
|
+
};
|
|
391
|
+
}
|
|
392
|
+
/** @return timing samples and amortized allocation from benchmark execution */
|
|
393
|
+
async function collectSamples(p) {
|
|
394
|
+
if (!p.maxIterations && !p.maxTime) throw new Error(`At least one of maxIterations or maxTime must be set`);
|
|
395
|
+
const warmupSamples = p.skipWarmup ? [] : await runWarmup(p);
|
|
396
|
+
const heapBefore = process.memoryUsage().heapUsed;
|
|
397
|
+
const { samples, heapSamples, timestamps, optStatuses, pausePoints } = await runSampleLoop(p);
|
|
398
|
+
const heapGrowth = Math.max(0, process.memoryUsage().heapUsed - heapBefore) / 1024 / samples.length;
|
|
399
|
+
if (samples.length === 0) throw new Error(`No samples collected for benchmark: ${p.benchmark.name}`);
|
|
400
|
+
return {
|
|
401
|
+
samples,
|
|
402
|
+
warmupSamples,
|
|
403
|
+
heapGrowth,
|
|
404
|
+
heapSamples,
|
|
405
|
+
timestamps,
|
|
406
|
+
optStatus: p.traceOpt ? analyzeOptStatus(samples, optStatuses) : void 0,
|
|
407
|
+
optSamples: p.traceOpt && optStatuses.length > 0 ? optStatuses : void 0,
|
|
408
|
+
pausePoints
|
|
409
|
+
};
|
|
410
|
+
}
|
|
411
|
+
/** Run warmup iterations with gc + settle time for V8 optimization */
|
|
412
|
+
async function runWarmup(p) {
|
|
413
|
+
const gc = gcFunction();
|
|
414
|
+
const samples = new Array(p.warmup);
|
|
415
|
+
for (let i = 0; i < p.warmup; i++) {
|
|
416
|
+
const start = performance.now();
|
|
417
|
+
executeBenchmark(p.benchmark, p.params);
|
|
418
|
+
samples[i] = performance.now() - start;
|
|
419
|
+
}
|
|
420
|
+
gc();
|
|
421
|
+
if (!p.noSettle) {
|
|
422
|
+
await new Promise((r) => setTimeout(r, gcSettleTime));
|
|
423
|
+
gc();
|
|
424
|
+
}
|
|
425
|
+
return samples;
|
|
426
|
+
}
|
|
427
|
+
/** Estimate sample count for pre-allocation */
|
|
428
|
+
function estimateSampleCount(maxTime, maxIterations) {
|
|
429
|
+
return maxIterations || Math.ceil(maxTime / .1);
|
|
430
|
+
}
|
|
431
|
+
/** Pre-allocate arrays to reduce GC pressure during measurement */
|
|
432
|
+
function createSampleArrays(n, trackHeap, trackOpt) {
|
|
433
|
+
const arr = (track) => track ? new Array(n) : [];
|
|
434
|
+
return {
|
|
435
|
+
samples: new Array(n),
|
|
436
|
+
timestamps: new Array(n),
|
|
437
|
+
heapSamples: arr(trackHeap),
|
|
438
|
+
optStatuses: arr(trackOpt),
|
|
439
|
+
pausePoints: []
|
|
440
|
+
};
|
|
441
|
+
}
|
|
442
|
+
/** Trim arrays to actual sample count */
|
|
443
|
+
function trimArrays(a, count, trackHeap, trackOpt) {
|
|
444
|
+
a.samples.length = a.timestamps.length = count;
|
|
445
|
+
if (trackHeap) a.heapSamples.length = count;
|
|
446
|
+
if (trackOpt) a.optStatuses.length = count;
|
|
447
|
+
}
|
|
448
|
+
/** Collect timing samples with periodic pauses for V8 optimization */
|
|
449
|
+
async function runSampleLoop(p) {
|
|
450
|
+
const { maxTime, maxIterations, pauseFirst, pauseInterval = 0, pauseDuration = 100 } = p;
|
|
451
|
+
const trackHeap = true;
|
|
452
|
+
const getOptStatus = p.traceOpt ? createOptStatusGetter() : void 0;
|
|
453
|
+
const a = createSampleArrays(estimateSampleCount(maxTime, maxIterations), trackHeap, !!getOptStatus);
|
|
454
|
+
let count = 0;
|
|
455
|
+
let elapsed = 0;
|
|
456
|
+
let totalPauseTime = 0;
|
|
457
|
+
const loopStart = performance.now();
|
|
458
|
+
while ((!maxIterations || count < maxIterations) && (!maxTime || elapsed < maxTime)) {
|
|
459
|
+
const start = performance.now();
|
|
460
|
+
executeBenchmark(p.benchmark, p.params);
|
|
461
|
+
const end = performance.now();
|
|
462
|
+
a.samples[count] = end - start;
|
|
463
|
+
a.timestamps[count] = Number(process.hrtime.bigint() / 1000n);
|
|
464
|
+
a.heapSamples[count] = getHeapStatistics().used_heap_size;
|
|
465
|
+
if (getOptStatus) a.optStatuses[count] = getOptStatus(p.benchmark.fn);
|
|
466
|
+
count++;
|
|
467
|
+
if (shouldPause(count, pauseFirst, pauseInterval)) {
|
|
468
|
+
a.pausePoints.push({
|
|
469
|
+
sampleIndex: count - 1,
|
|
470
|
+
durationMs: pauseDuration
|
|
471
|
+
});
|
|
472
|
+
const pauseStart = performance.now();
|
|
473
|
+
await new Promise((r) => setTimeout(r, pauseDuration));
|
|
474
|
+
totalPauseTime += performance.now() - pauseStart;
|
|
475
|
+
}
|
|
476
|
+
elapsed = performance.now() - loopStart - totalPauseTime;
|
|
477
|
+
}
|
|
478
|
+
trimArrays(a, count, trackHeap, !!getOptStatus);
|
|
479
|
+
return {
|
|
480
|
+
samples: a.samples,
|
|
481
|
+
heapSamples: a.heapSamples,
|
|
482
|
+
timestamps: a.timestamps,
|
|
483
|
+
optStatuses: a.optStatuses,
|
|
484
|
+
pausePoints: a.pausePoints
|
|
485
|
+
};
|
|
486
|
+
}
|
|
487
|
+
/** Check if we should pause at this iteration for V8 optimization */
|
|
488
|
+
function shouldPause(iter, first, interval) {
|
|
489
|
+
if (first !== void 0 && iter === first) return true;
|
|
490
|
+
if (interval <= 0) return false;
|
|
491
|
+
if (first === void 0) return iter % interval === 0;
|
|
492
|
+
return (iter - first) % interval === 0;
|
|
493
|
+
}
|
|
494
|
+
/** @return percentiles and basic statistics */
|
|
495
|
+
function computeStats(samples) {
|
|
496
|
+
const sorted = [...samples].sort((a, b) => a - b);
|
|
497
|
+
const avg = samples.reduce((sum, s) => sum + s, 0) / samples.length;
|
|
498
|
+
return {
|
|
499
|
+
min: sorted[0],
|
|
500
|
+
max: sorted[sorted.length - 1],
|
|
501
|
+
avg,
|
|
502
|
+
p50: percentile(sorted, .5),
|
|
503
|
+
p75: percentile(sorted, .75),
|
|
504
|
+
p99: percentile(sorted, .99),
|
|
505
|
+
p999: percentile(sorted, .999)
|
|
506
|
+
};
|
|
507
|
+
}
|
|
508
|
+
/** @return percentile value with linear interpolation */
|
|
509
|
+
function percentile(sortedArray, p) {
|
|
510
|
+
const index = (sortedArray.length - 1) * p;
|
|
511
|
+
const lower = Math.floor(index);
|
|
512
|
+
const upper = Math.ceil(index);
|
|
513
|
+
const weight = index % 1;
|
|
514
|
+
if (upper >= sortedArray.length) return sortedArray[sortedArray.length - 1];
|
|
515
|
+
return sortedArray[lower] * (1 - weight) + sortedArray[upper] * weight;
|
|
516
|
+
}
|
|
517
|
+
/** @return runtime gc() function, or no-op if unavailable */
|
|
518
|
+
function gcFunction() {
|
|
519
|
+
const gc = globalThis.gc || globalThis.__gc;
|
|
520
|
+
if (gc) return gc;
|
|
521
|
+
console.warn("gc() not available, run node/bun with --expose-gc");
|
|
522
|
+
return () => {};
|
|
523
|
+
}
|
|
524
|
+
/** @return function to get V8 optimization status (requires --allow-natives-syntax) */
|
|
525
|
+
function createOptStatusGetter() {
|
|
526
|
+
try {
|
|
527
|
+
const getter = new Function("f", "return %GetOptimizationStatus(f)");
|
|
528
|
+
getter(() => {});
|
|
529
|
+
return getter;
|
|
530
|
+
} catch {
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
/**
|
|
535
|
+
* V8 optimization status bit meanings:
|
|
536
|
+
* Bit 0 (1): is_function
|
|
537
|
+
* Bit 4 (16): is_optimized (TurboFan)
|
|
538
|
+
* Bit 5 (32): is_optimized (Maglev)
|
|
539
|
+
* Bit 7 (128): is_baseline (Sparkplug)
|
|
540
|
+
* Bit 3 (8): maybe_deoptimized
|
|
541
|
+
*/
|
|
542
|
+
const statusNames = {
|
|
543
|
+
1: "interpreted",
|
|
544
|
+
129: "sparkplug",
|
|
545
|
+
17: "turbofan",
|
|
546
|
+
33: "maglev",
|
|
547
|
+
49: "turbofan+maglev",
|
|
548
|
+
32769: "optimized"
|
|
549
|
+
};
|
|
550
|
+
/** @return analysis of V8 optimization status per sample */
|
|
551
|
+
function analyzeOptStatus(samples, statuses) {
|
|
552
|
+
if (statuses.length === 0 || statuses[0] === void 0) return void 0;
|
|
553
|
+
const byStatusCode = /* @__PURE__ */ new Map();
|
|
554
|
+
let deoptCount = 0;
|
|
555
|
+
for (let i = 0; i < samples.length; i++) {
|
|
556
|
+
const status = statuses[i];
|
|
557
|
+
if (status === void 0) continue;
|
|
558
|
+
if (status & 8) deoptCount++;
|
|
559
|
+
if (!byStatusCode.has(status)) byStatusCode.set(status, []);
|
|
560
|
+
byStatusCode.get(status).push(samples[i]);
|
|
561
|
+
}
|
|
562
|
+
const byTier = {};
|
|
563
|
+
for (const [status, times] of byStatusCode) {
|
|
564
|
+
const name = statusNames[status] || `status=${status}`;
|
|
565
|
+
const sorted = [...times].sort((a, b) => a - b);
|
|
566
|
+
const median = sorted[Math.floor(sorted.length / 2)];
|
|
567
|
+
byTier[name] = {
|
|
568
|
+
count: times.length,
|
|
569
|
+
medianMs: median
|
|
570
|
+
};
|
|
571
|
+
}
|
|
572
|
+
return {
|
|
573
|
+
byTier,
|
|
574
|
+
deoptCount
|
|
575
|
+
};
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
//#endregion
|
|
579
|
+
//#region src/runners/CreateRunner.ts
|
|
580
|
+
/** @return benchmark runner */
|
|
581
|
+
async function createRunner(_runnerName) {
|
|
582
|
+
return new BasicRunner();
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
//#endregion
|
|
586
|
+
//#region src/runners/TimingUtils.ts
|
|
587
|
+
const debugWorkerTiming = false;
|
|
588
|
+
/** Get current time or 0 if debugging disabled */
|
|
589
|
+
function getPerfNow() {
|
|
590
|
+
return 0;
|
|
591
|
+
}
|
|
592
|
+
/** Calculate elapsed milliseconds between marks */
|
|
593
|
+
function getElapsed(startMark, endMark) {
|
|
594
|
+
return 0;
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
//#endregion
|
|
598
|
+
export { BasicRunner as a, createAdaptiveWrapper as c, bootstrapDifferenceCI as d, discoverVariants as f, createRunner as i, msToNs as l, getElapsed as n, computeStats as o, variantModuleUrl as p, getPerfNow as r, checkConvergence as s, debugWorkerTiming as t, average as u };
|
|
599
|
+
//# sourceMappingURL=TimingUtils-D4z1jpp2.mjs.map
|