perfshield 0.0.7 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/lib/runner.js +110 -93
  2. package/package.json +1 -1
package/lib/runner.js CHANGED
@@ -12,6 +12,23 @@ const getVersionOrder = seed => {
12
12
  }
13
13
  return [versions[1], versions[0]];
14
14
  };
15
+ const buildIndexOrder = (count, seed) => {
16
+ const order = Array.from({
17
+ length: count
18
+ }, (_, index) => index);
19
+ if (count <= 1) {
20
+ return order;
21
+ }
22
+ let state = seed >>> 0;
23
+ for (let i = count - 1; i > 0; i -= 1) {
24
+ state = state * 1_664_525 + 1_013_904_223 >>> 0;
25
+ const j = state % (i + 1);
26
+ const swap = order[i];
27
+ order[i] = order[j];
28
+ order[j] = swap;
29
+ }
30
+ return order;
31
+ };
15
32
  const sleep = async delayMs => {
16
33
  if (delayMs <= 0) {
17
34
  return;
@@ -20,43 +37,37 @@ const sleep = async delayMs => {
20
37
  setTimeout(resolve, delayMs);
21
38
  });
22
39
  };
23
- const warmupBenchmarks = async (harness, benchmarks, delayMs, progress) => {
24
- const warmups = [];
25
- for (let index = 0; index < benchmarks.length; index += 1) {
26
- const descriptor = benchmarks[index];
27
- const order = getVersionOrder(index);
28
- let baselineSample;
29
- let currentSample;
30
- for (const version of order) {
31
- const result = await harness.runSample({
32
- index,
33
- iterations: descriptor.iterations,
34
- version
35
- });
36
- if (version === "baseline") {
37
- baselineSample = result.durationMs;
38
- } else {
39
- currentSample = result.durationMs;
40
- }
41
- }
42
- if (baselineSample == null || currentSample == null) {
43
- throw new Error("Warmup did not collect baseline/current samples.");
44
- }
45
- warmups.push({
46
- baseline: baselineSample,
47
- current: currentSample
40
+ const warmupBenchmark = async (harness, benchmark, index, delayMs, progress) => {
41
+ const order = getVersionOrder(index);
42
+ let baselineSample;
43
+ let currentSample;
44
+ for (const version of order) {
45
+ const result = await harness.runSample({
46
+ index,
47
+ iterations: benchmark.iterations,
48
+ version
48
49
  });
49
- if (progress) {
50
- progress({
51
- benchmarkCount: benchmarks.length,
52
- benchmarkIndex: index,
53
- benchmarkName: descriptor.name,
54
- phase: "warmup"
55
- });
50
+ if (version === "baseline") {
51
+ baselineSample = result.durationMs;
52
+ } else {
53
+ currentSample = result.durationMs;
56
54
  }
57
- await sleep(delayMs);
58
55
  }
59
- return warmups;
56
+ if (baselineSample == null || currentSample == null) {
57
+ throw new Error("Warmup did not collect baseline/current samples.");
58
+ }
59
+ if (progress) {
60
+ progress({
61
+ benchmarkIndex: index,
62
+ benchmarkName: benchmark.name,
63
+ phase: "warmup"
64
+ });
65
+ }
66
+ await sleep(delayMs);
67
+ return {
68
+ baseline: baselineSample,
69
+ current: currentSample
70
+ };
60
71
  };
61
72
  const computeIterationOverrides = (benchmarks, warmups, minTimeMs) => {
62
73
  if (minTimeMs <= 0) {
@@ -121,35 +132,26 @@ const runSamplePair = async (harness, index, iterations, order) => {
121
132
  current: currentSample
122
133
  };
123
134
  };
124
- const collectSamples = async (harness, benchmarks, minSamples, iterationOverrides, delayMs, minTimeMs, samples, progress) => {
125
- const buckets = samples ?? benchmarks.map(() => ({
126
- baseline: [],
127
- current: []
128
- }));
129
- let completed = 0;
130
- const total = minSamples * benchmarks.length;
135
+ const collectSamplesForBenchmark = async (harness, benchmark, index, minSamples, iterationOverrides, delayMs, minTimeMs, bucket, progress, progressState) => {
131
136
  for (let iteration = 0; iteration < minSamples; iteration += 1) {
132
137
  const order = getVersionOrder(iteration);
133
- for (let index = 0; index < benchmarks.length; index += 1) {
134
- const iterations = iterationOverrides[index];
135
- const minimumIterations = benchmarks[index].iterations ?? 1;
136
- const result = await runSamplePair(harness, index, iterations, order);
137
- buckets[index].baseline.push(result.baseline);
138
- buckets[index].current.push(result.current);
139
- const nextIterations = updateIterations(iterations ?? minimumIterations, result.baseline, result.current, minTimeMs, minimumIterations);
140
- iterationOverrides[index] = nextIterations;
141
- completed += 1;
142
- if (progress) {
143
- progress({
144
- completed,
145
- phase: "samples",
146
- total
147
- });
148
- }
149
- await sleep(delayMs);
138
+ const iterations = iterationOverrides[index];
139
+ const minimumIterations = benchmark.iterations ?? 1;
140
+ const result = await runSamplePair(harness, index, iterations, order);
141
+ bucket.baseline.push(result.baseline);
142
+ bucket.current.push(result.current);
143
+ const nextIterations = updateIterations(iterations ?? minimumIterations, result.baseline, result.current, minTimeMs, minimumIterations);
144
+ iterationOverrides[index] = nextIterations;
145
+ if (progress && progressState) {
146
+ progressState.completed += 1;
147
+ progress({
148
+ completed: progressState.completed,
149
+ phase: "samples",
150
+ total: progressState.total
151
+ });
150
152
  }
153
+ await sleep(delayMs);
151
154
  }
152
- return buckets;
153
155
  };
154
156
  const intervalContains = (interval, value) => interval.low <= value && value <= interval.high;
155
157
  const autoSampleResolved = (samples, conditions, maxRelativeMargin) => samples.every(bucket => {
@@ -171,35 +173,33 @@ const autoSampleResolved = (samples, conditions, maxRelativeMargin) => samples.e
171
173
  }
172
174
  return true;
173
175
  });
174
- const autoSample = async (harness, benchmarks, samples, conditions, maxRelativeMargin, iterationOverrides, delayMs, minTimeMs, progress, timeoutMs) => {
176
+ const autoSampleForBenchmark = async (harness, benchmark, index, bucket, conditions, maxRelativeMargin, iterationOverrides, delayMs, minTimeMs, progress, timeoutMs) => {
175
177
  const startTime = Date.now();
176
178
  let roundRobinSeed = 0;
177
179
  let completed = 0;
178
180
  while (Date.now() - startTime < timeoutMs) {
179
- if (autoSampleResolved(samples, conditions, maxRelativeMargin)) {
181
+ if (autoSampleResolved([bucket], conditions, maxRelativeMargin)) {
180
182
  return;
181
183
  }
182
184
  for (let batch = 0; batch < autoSampleBatchSize; batch += 1) {
183
185
  const order = getVersionOrder(roundRobinSeed);
184
186
  roundRobinSeed += 1;
185
- for (let index = 0; index < benchmarks.length; index += 1) {
186
- const iterations = iterationOverrides[index];
187
- const minimumIterations = benchmarks[index].iterations ?? 1;
188
- const result = await runSamplePair(harness, index, iterations, order);
189
- samples[index].baseline.push(result.baseline);
190
- samples[index].current.push(result.current);
191
- const nextIterations = updateIterations(iterations ?? minimumIterations, result.baseline, result.current, minTimeMs, minimumIterations);
192
- iterationOverrides[index] = nextIterations;
193
- completed += 1;
194
- if (progress) {
195
- progress({
196
- completed,
197
- elapsedMs: Date.now() - startTime,
198
- phase: "autosample"
199
- });
200
- }
201
- await sleep(delayMs);
187
+ const iterations = iterationOverrides[index];
188
+ const minimumIterations = benchmark.iterations ?? 1;
189
+ const result = await runSamplePair(harness, index, iterations, order);
190
+ bucket.baseline.push(result.baseline);
191
+ bucket.current.push(result.current);
192
+ const nextIterations = updateIterations(iterations ?? minimumIterations, result.baseline, result.current, minTimeMs, minimumIterations);
193
+ iterationOverrides[index] = nextIterations;
194
+ completed += 1;
195
+ if (progress) {
196
+ progress({
197
+ completed,
198
+ elapsedMs: Date.now() - startTime,
199
+ phase: "autosample"
200
+ });
202
201
  }
202
+ await sleep(delayMs);
203
203
  }
204
204
  }
205
205
  };
@@ -223,24 +223,41 @@ export const runEngineComparison = async options => {
223
223
  const effectiveMinTimeMs = minTimeMs / Math.max(1, sampleScale * benchmarkScale);
224
224
  const delayMs = config.sampling.delayMs ?? 0;
225
225
  const maxRelativeMargin = config.sampling.maxRelativeMargin ?? defaultMaxRelativeMargin;
226
- const warmups = await warmupBenchmarks(harness, benchmarks, delayMs, options.progress);
227
- const iterationOverrides = computeIterationOverrides(benchmarks, warmups, effectiveMinTimeMs);
228
- const samples = warmups.map(warmup => ({
229
- baseline: [warmup.baseline],
230
- current: [warmup.current]
231
- }));
232
226
  const remainingSamples = Math.max(0, config.sampling.minSamples - 1);
233
- if (remainingSamples > 0) {
234
- await collectSamples(harness, benchmarks, remainingSamples, iterationOverrides, delayMs, effectiveMinTimeMs, samples, options.progress);
235
- }
236
- await autoSample(harness, benchmarks, samples, config.sampling.conditions, maxRelativeMargin, iterationOverrides, delayMs, effectiveMinTimeMs, options.progress, config.sampling.timeoutMs);
237
- const benchmarkResults = benchmarks.map((benchmark, index) => {
238
- const baselineSamples = samples[index].baseline;
239
- const currentSamples = samples[index].current;
227
+ const progressState = {
228
+ completed: 0,
229
+ total: remainingSamples * benchmarks.length
230
+ };
231
+ const benchmarkResults = new Array(benchmarks.length);
232
+ const iterationOverrides = benchmarks.map(() => undefined);
233
+ const benchmarkOrder = buildIndexOrder(benchmarks.length, 0);
234
+ const autoSampleDeadline = Date.now() + config.sampling.timeoutMs;
235
+ for (const index of benchmarkOrder) {
236
+ const benchmark = benchmarks[index];
237
+ const progress = options.progress;
238
+ const warmupSample = await warmupBenchmark(harness, benchmark, index, delayMs, progress ? event => progress({
239
+ ...event,
240
+ benchmarkCount: benchmarks.length
241
+ }) : undefined);
242
+ const iterationOverride = computeIterationOverrides([benchmark], [warmupSample], effectiveMinTimeMs)[0];
243
+ iterationOverrides[index] = iterationOverride;
244
+ const bucket = {
245
+ baseline: [warmupSample.baseline],
246
+ current: [warmupSample.current]
247
+ };
248
+ if (remainingSamples > 0) {
249
+ await collectSamplesForBenchmark(harness, benchmark, index, remainingSamples, iterationOverrides, delayMs, effectiveMinTimeMs, bucket, progress, progressState);
250
+ }
251
+ const remainingTimeoutMs = Math.max(0, autoSampleDeadline - Date.now());
252
+ if (remainingTimeoutMs > 0) {
253
+ await autoSampleForBenchmark(harness, benchmark, index, bucket, config.sampling.conditions, maxRelativeMargin, iterationOverrides, delayMs, effectiveMinTimeMs, progress, remainingTimeoutMs);
254
+ }
255
+ const baselineSamples = bucket.baseline;
256
+ const currentSamples = bucket.current;
240
257
  const baselineStats = summaryStats(baselineSamples);
241
258
  const currentStats = summaryStats(currentSamples);
242
259
  const difference = computeRelativeDifferenceFromSamples(baselineSamples, currentSamples);
243
- return {
260
+ benchmarkResults[index] = {
244
261
  benchmark,
245
262
  difference,
246
263
  samples: {
@@ -252,7 +269,7 @@ export const runEngineComparison = async options => {
252
269
  current: currentStats
253
270
  }
254
271
  };
255
- });
272
+ }
256
273
  return {
257
274
  benchmarks: benchmarkResults,
258
275
  engine
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "perfshield",
3
- "version": "0.0.7",
3
+ "version": "0.0.9",
4
4
  "description": "A tool for doing web benchmarking across multiple JS engines and with statistical signifigance",
5
5
  "license": "MIT",
6
6
  "type": "module",