perfshield 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -11,7 +11,7 @@ runtime JS engines (Node/V8 today).
11
11
  - Saves a baseline bundle (`prepare`).
12
12
  - Builds the current bundle and compares it to the baseline (`compare`).
13
13
  - Reports results in console and/or JSON.
14
- - Exits with code 1 when a regression is detected (CI excludes 0 in the slower direction).
14
+ - Exits with code 1 when a regression is detected (both relative and absolute CIs exclude 0 in the slower direction).
15
15
 
16
16
  ## Requirements
17
17
 
@@ -77,6 +77,15 @@ perfshield prepare --config perfshield.config.json
77
77
  perfshield compare --config perfshield.config.json
78
78
  ```
79
79
 
80
+ 4. (Optional) Calibrate sampling defaults based on the prepared baseline:
81
+
82
+ ```
83
+ perfshield calibrate --config perfshield.config.json
84
+ ```
85
+
86
+ This prints a JSON snippet with recommended `sampling` values you can paste into
87
+ your config.
88
+
80
89
  ## Benchmark bundle contract
81
90
 
82
91
  The build output must be a single ESM file that exports:
@@ -115,7 +124,7 @@ Supported formats:
115
124
  - `console`: human‑readable summary.
116
125
  - `json`: machine‑readable report.
117
126
 
118
- If any benchmark shows a regression (CI excludes 0 in the slower direction),
127
+ If any benchmark shows a regression (both relative and absolute CIs exclude 0 in the slower direction),
119
128
  the process exits with code 1.
120
129
 
121
130
  ## Examples
@@ -0,0 +1,145 @@
1
+ import { resolve } from "node:path";
2
+ import jstat from "jstat";
3
+ import { createNodeHarness } from "./engines/node.js";
4
+ import { buildHarnessIfNeeded, getHarnessPath } from "./harness.js";
5
+ import { summaryStats } from "./stats.js";
6
+ const calibrationSampleCount = 20;
7
+ const calibrationMinSampleMs = 2;
8
+ const targetMinSampleMs = 5;
9
+ const minRecommendedSamples = 5;
10
+ const minTimeoutMs = 1000;
11
+ const timeoutSafetyFactor = 1.5;
12
+ const confidenceLevel = 0.95;
13
+ const defaultMaxRelativeMargin = 0.05;
14
+ const computeIterationsForTarget = (iterationsBase, warmupMs, targetMs) => {
15
+ if (targetMs <= 0) {
16
+ return iterationsBase;
17
+ }
18
+ const perIter = warmupMs / iterationsBase;
19
+ if (!Number.isFinite(perIter) || perIter <= 0) {
20
+ return iterationsBase;
21
+ }
22
+ return Math.max(iterationsBase, Math.ceil(targetMs / perIter));
23
+ };
24
+ const collectWarmups = async (harness, benchmarks) => {
25
+ const warmups = [];
26
+ for (let index = 0; index < benchmarks.length; index += 1) {
27
+ const descriptor = benchmarks[index];
28
+ const result = await harness.runSample({
29
+ index,
30
+ iterations: descriptor.iterations,
31
+ version: "baseline"
32
+ });
33
+ warmups.push(result.durationMs);
34
+ }
35
+ return warmups;
36
+ };
37
+ const collectCalibrationSamples = async (harness, benchmarks, iterationOverrides) => {
38
+ const samples = benchmarks.map(() => []);
39
+ for (let iteration = 0; iteration < calibrationSampleCount; iteration += 1) {
40
+ for (let index = 0; index < benchmarks.length; index += 1) {
41
+ const result = await harness.runSample({
42
+ index,
43
+ iterations: iterationOverrides[index],
44
+ version: "baseline"
45
+ });
46
+ samples[index].push(result.durationMs);
47
+ }
48
+ }
49
+ return samples;
50
+ };
51
+ const computeRequiredSamples = (stats, targetMargin, tValue) => {
52
+ if (targetMargin <= 0) {
53
+ return minRecommendedSamples;
54
+ }
55
+ const rsd = stats.relativeStandardDeviation;
56
+ if (!Number.isFinite(rsd) || rsd <= 0) {
57
+ return minRecommendedSamples;
58
+ }
59
+ const required = Math.ceil((tValue * rsd / targetMargin) ** 2);
60
+ if (!Number.isFinite(required) || required <= 0) {
61
+ return minRecommendedSamples;
62
+ }
63
+ return Math.max(minRecommendedSamples, required);
64
+ };
65
+ const computeMinTimeMs = warmups => {
66
+ const finiteWarmups = warmups.filter(value => Number.isFinite(value));
67
+ if (finiteWarmups.length === 0) {
68
+ return targetMinSampleMs;
69
+ }
70
+ const fastest = Math.min(...finiteWarmups);
71
+ if (fastest < targetMinSampleMs) {
72
+ return targetMinSampleMs;
73
+ }
74
+ return 0;
75
+ };
76
+ const computeTimeoutMs = (warmups, minSamples, minTimeMs, delayMs) => {
77
+ let perRound = 0;
78
+ for (const warmup of warmups) {
79
+ const base = Number.isFinite(warmup) && warmup > 0 ? warmup : minTimeMs;
80
+ perRound += Math.max(base, minTimeMs);
81
+ }
82
+ const sampleCount = Math.max(1, minSamples);
83
+ const totalSamples = sampleCount * warmups.length;
84
+ const estimated = perRound * sampleCount + delayMs * Math.max(0, totalSamples - 1);
85
+ const timeout = Math.ceil(estimated * timeoutSafetyFactor);
86
+ if (!Number.isFinite(timeout) || timeout <= 0) {
87
+ return minTimeoutMs;
88
+ }
89
+ return Math.max(minTimeoutMs, timeout);
90
+ };
91
+ const calibrateEngine = async (engine, baselinePath, maxRelativeMargin, delayMs, harnessPath) => {
92
+ const resolvedBaseline = resolve(baselinePath);
93
+ const harness = await createNodeHarness(engine, harnessPath, resolvedBaseline, resolvedBaseline);
94
+ try {
95
+ const benchmarks = await harness.listBenchmarks();
96
+ const warmups = await collectWarmups(harness, benchmarks);
97
+ const iterationOverrides = benchmarks.map((benchmark, index) => computeIterationsForTarget(benchmark.iterations ?? 1, warmups[index], calibrationMinSampleMs));
98
+ const samples = await collectCalibrationSamples(harness, benchmarks, iterationOverrides);
99
+ const benchmarkStats = benchmarks.map((benchmark, index) => ({
100
+ benchmark,
101
+ stats: summaryStats(samples[index]),
102
+ warmupMs: warmups[index]
103
+ }));
104
+ const tValue = jstat.studentt.inv(1 - (1 - confidenceLevel) / 2, Math.max(1, calibrationSampleCount - 1));
105
+ const minSamples = benchmarkStats.reduce((max, entry) => {
106
+ const required = computeRequiredSamples(entry.stats, maxRelativeMargin, tValue);
107
+ return Math.max(max, required);
108
+ }, minRecommendedSamples);
109
+ const minTimeMs = computeMinTimeMs(warmups);
110
+ const timeoutMs = computeTimeoutMs(warmups, minSamples, minTimeMs, delayMs);
111
+ return {
112
+ engine,
113
+ minSamples,
114
+ minTimeMs,
115
+ timeoutMs
116
+ };
117
+ } finally {
118
+ await harness.close();
119
+ }
120
+ };
121
+ export const calibrateSampling = async (config, baselinePath) => {
122
+ const maxRelativeMargin = config.sampling.maxRelativeMargin ?? defaultMaxRelativeMargin;
123
+ const delayMs = config.sampling.delayMs ?? 0;
124
+ const harnessArtifact = await buildHarnessIfNeeded(getHarnessPath());
125
+ try {
126
+ const engineResults = [];
127
+ for (const engine of config.engines) {
128
+ engineResults.push(await calibrateEngine(engine, baselinePath, maxRelativeMargin, delayMs, harnessArtifact.path));
129
+ }
130
+ const minSamples = engineResults.reduce((max, entry) => Math.max(max, entry.minSamples), minRecommendedSamples);
131
+ const minTimeMs = engineResults.reduce((max, entry) => Math.max(max, entry.minTimeMs), 0);
132
+ const timeoutMs = engineResults.reduce((max, entry) => Math.max(max, entry.timeoutMs), minTimeoutMs);
133
+ return {
134
+ conditions: config.sampling.conditions,
135
+ maxRelativeMargin,
136
+ minSamples,
137
+ minTimeMs,
138
+ timeoutMs
139
+ };
140
+ } finally {
141
+ if (harnessArtifact.cleanup) {
142
+ await harnessArtifact.cleanup();
143
+ }
144
+ }
145
+ };
package/lib/cli.js CHANGED
@@ -1,13 +1,14 @@
1
1
  #!/usr/bin/env node
2
2
  import { WriteStream } from "node:tty";
3
3
  import { ensureBaseline, saveBaseline } from "./artifacts.js";
4
+ import { calibrateSampling } from "./calibrate.js";
4
5
  import { runBuild } from "./build.js";
5
6
  import { ConfigError, formatConfigError, loadConfig } from "./config.js";
6
7
  import { getRegressions } from "./regression.js";
7
8
  import { renderReports } from "./report/index.js";
8
9
  import { runEngineComparison } from "./runner.js";
9
10
  const usage = () => {
10
- console.error("Usage: perfshield <prepare|compare> [--config path]");
11
+ console.error("Usage: perfshield <prepare|compare|calibrate> [--config path]");
11
12
  };
12
13
  const getFlagValue = (args, flag) => {
13
14
  const index = args.indexOf(flag);
@@ -99,10 +100,17 @@ const runCompare = async config => {
99
100
  process.exitCode = 1;
100
101
  }
101
102
  };
103
+ const runCalibrate = async config => {
104
+ const baselinePath = await ensureBaseline(config);
105
+ const sampling = await calibrateSampling(config, baselinePath);
106
+ console.log(JSON.stringify({
107
+ sampling
108
+ }, null, 2));
109
+ };
102
110
  const main = async () => {
103
111
  const args = process.argv.slice(2);
104
112
  const command = args[0];
105
- if (command !== "prepare" && command !== "compare") {
113
+ if (command !== "prepare" && command !== "compare" && command !== "calibrate") {
106
114
  usage();
107
115
  process.exitCode = 1;
108
116
  return;
@@ -123,6 +131,10 @@ const main = async () => {
123
131
  await runPrepare(config);
124
132
  return;
125
133
  }
134
+ if (command === "calibrate") {
135
+ await runCalibrate(config);
136
+ return;
137
+ }
126
138
  await runCompare(config);
127
139
  };
128
140
  main().catch(error => {
package/lib/harness.js ADDED
@@ -0,0 +1,51 @@
1
+ import { access, mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
2
+ import { tmpdir } from "node:os";
3
+ import { join, resolve } from "node:path";
4
+ import { fileURLToPath } from "node:url";
5
+ import { transformFileAsync } from "@babel/core";
6
+ const harnessTempPrefix = "perfshield-harness-";
7
+ const hasBabelConfig = async () => {
8
+ const configPath = resolve(process.cwd(), "babel.config.cjs");
9
+ try {
10
+ await access(configPath);
11
+ return true;
12
+ } catch {
13
+ return false;
14
+ }
15
+ };
16
+ export const getHarnessPath = () => {
17
+ const override = process.env.WEB_BENCHMARKER_HARNESS_PATH;
18
+ if (override != null) {
19
+ return override;
20
+ }
21
+ return fileURLToPath(new URL("./engines/node-harness.js", import.meta.url).toString());
22
+ };
23
+ export const buildHarnessIfNeeded = async sourcePath => {
24
+ const contents = await readFile(sourcePath, "utf8");
25
+ if (!contents.includes("import type") && !contents.includes("@flow")) {
26
+ return {
27
+ cleanup: null,
28
+ path: sourcePath
29
+ };
30
+ }
31
+ const usesConfig = await hasBabelConfig();
32
+ const result = await transformFileAsync(sourcePath, {
33
+ configFile: usesConfig ? resolve(process.cwd(), "babel.config.cjs") : false,
34
+ presets: usesConfig ? [] : ["@babel/preset-flow"]
35
+ });
36
+ if (!result || !result.code) {
37
+ throw new Error("Failed to compile node harness.");
38
+ }
39
+ const dir = await mkdtemp(join(tmpdir(), harnessTempPrefix));
40
+ const harnessPath = join(dir, "node-harness.js");
41
+ await writeFile(harnessPath, result.code, "utf8");
42
+ return {
43
+ cleanup: async () => {
44
+ await rm(dir, {
45
+ force: true,
46
+ recursive: true
47
+ });
48
+ },
49
+ path: harnessPath
50
+ };
51
+ };
package/lib/regression.js CHANGED
@@ -3,7 +3,7 @@ export const getRegressions = results => {
3
3
  const findings = [];
4
4
  for (const result of results) {
5
5
  for (const entry of result.benchmarks) {
6
- if (isPositiveInterval(entry.difference.relative.ci)) {
6
+ if (isPositiveInterval(entry.difference.relative.ci) && isPositiveInterval(entry.difference.absolute.ci)) {
7
7
  findings.push({
8
8
  benchmark: entry.benchmark.name,
9
9
  engine: result.engine.name,
@@ -10,11 +10,13 @@ const formatRelativeInterval = (interval, decimals) => formatInterval({
10
10
  low: interval.low * 100
11
11
  }, decimals, "%");
12
12
  const formatRelativeValue = (value, decimals) => `${formatNumber(value * 100, decimals)}%`;
13
- const classifyDifference = interval => {
14
- if (interval.low > 0 && interval.high > 0) {
13
+ const isPositiveInterval = interval => interval.low > 0 && interval.high > 0;
14
+ const isNegativeInterval = interval => interval.low < 0 && interval.high < 0;
15
+ const classifyDifference = difference => {
16
+ if (isPositiveInterval(difference.relative.ci) && isPositiveInterval(difference.absolute.ci)) {
15
17
  return "regression";
16
18
  }
17
- if (interval.low < 0 && interval.high < 0) {
19
+ if (isNegativeInterval(difference.relative.ci) && isNegativeInterval(difference.absolute.ci)) {
18
20
  return "improvement";
19
21
  }
20
22
  return "no significant change";
@@ -28,7 +30,7 @@ export const renderConsoleReport = results => {
28
30
  lines.push(`Engine: ${result.engine.name}`);
29
31
  for (const entry of result.benchmarks) {
30
32
  const unit = entry.benchmark.unit != null ? ` ${entry.benchmark.unit}` : "";
31
- const status = classifyDifference(entry.difference.relative.ci);
33
+ const status = classifyDifference(entry.difference);
32
34
  if (status === "regression") {
33
35
  regressions += 1;
34
36
  } else if (status === "improvement") {
@@ -36,7 +38,7 @@ export const renderConsoleReport = results => {
36
38
  } else {
37
39
  neutral += 1;
38
40
  }
39
- const benchmarkLines = [` Benchmark: ${entry.benchmark.name}`, ` Result: ${status} (mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)})`, ` baseline mean=${formatNumber(entry.stats.baseline.mean, 4)}${unit} ci=${formatInterval(entry.stats.baseline.meanCI, 4)} sd=${formatNumber(entry.stats.baseline.standardDeviation, 4)}`, ` current mean=${formatNumber(entry.stats.current.mean, 4)}${unit} ci=${formatInterval(entry.stats.current.meanCI, 4)} sd=${formatNumber(entry.stats.current.standardDeviation, 4)}`, ` diff rel mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)}`];
41
+ const benchmarkLines = [` Benchmark: ${entry.benchmark.name}`, ` Result: ${status} (mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)})`, ` baseline mean=${formatNumber(entry.stats.baseline.mean, 4)}${unit} ci=${formatInterval(entry.stats.baseline.meanCI, 4)} sd=${formatNumber(entry.stats.baseline.standardDeviation, 4)}`, ` current mean=${formatNumber(entry.stats.current.mean, 4)}${unit} ci=${formatInterval(entry.stats.current.meanCI, 4)} sd=${formatNumber(entry.stats.current.standardDeviation, 4)}`, ` diff rel mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)}`, ` diff abs mean=${formatNumber(entry.difference.absolute.mean, 4)}${unit} ci=${formatInterval(entry.difference.absolute.ci, 4)}`];
40
42
  lines.push(...benchmarkLines);
41
43
  }
42
44
  lines.push(` Summary: regressions=${regressions} improvements=${improvements} neutral=${neutral}`, "");
package/lib/runner.js CHANGED
@@ -1,13 +1,9 @@
1
- import { access, mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
2
- import { tmpdir } from "node:os";
3
- import { join, resolve } from "node:path";
4
- import { fileURLToPath } from "node:url";
5
- import { transformFileAsync } from "@babel/core";
1
+ import { resolve } from "node:path";
6
2
  import { createNodeHarness } from "./engines/node.js";
3
+ import { buildHarnessIfNeeded, getHarnessPath } from "./harness.js";
7
4
  import { computeRelativeDifferenceFromSamples, summaryStats } from "./stats.js";
8
5
  const versions = ["baseline", "current"];
9
6
  const autoSampleBatchSize = 10;
10
- const harnessTempPrefix = "perfshield-harness-";
11
7
  const defaultMinTimeMs = 20;
12
8
  const defaultMaxRelativeMargin = 0.05;
13
9
  const getVersionOrder = seed => {
@@ -16,51 +12,6 @@ const getVersionOrder = seed => {
16
12
  }
17
13
  return [versions[1], versions[0]];
18
14
  };
19
- const getHarnessPath = () => {
20
- const override = process.env.WEB_BENCHMARKER_HARNESS_PATH;
21
- if (override != null) {
22
- return override;
23
- }
24
- return fileURLToPath(new URL("./engines/node-harness.js", import.meta.url).toString());
25
- };
26
- const hasBabelConfig = async () => {
27
- const configPath = resolve(process.cwd(), "babel.config.cjs");
28
- try {
29
- await access(configPath);
30
- return true;
31
- } catch {
32
- return false;
33
- }
34
- };
35
- const buildHarnessIfNeeded = async sourcePath => {
36
- const contents = await readFile(sourcePath, "utf8");
37
- if (!contents.includes("import type") && !contents.includes("@flow")) {
38
- return {
39
- cleanup: null,
40
- path: sourcePath
41
- };
42
- }
43
- const usesConfig = await hasBabelConfig();
44
- const result = await transformFileAsync(sourcePath, {
45
- configFile: usesConfig ? resolve(process.cwd(), "babel.config.cjs") : false,
46
- presets: usesConfig ? [] : ["@babel/preset-flow"]
47
- });
48
- if (!result || !result.code) {
49
- throw new Error("Failed to compile node harness.");
50
- }
51
- const dir = await mkdtemp(join(tmpdir(), harnessTempPrefix));
52
- const harnessPath = join(dir, "node-harness.js");
53
- await writeFile(harnessPath, result.code, "utf8");
54
- return {
55
- cleanup: async () => {
56
- await rm(dir, {
57
- force: true,
58
- recursive: true
59
- });
60
- },
61
- path: harnessPath
62
- };
63
- };
64
15
  const sleep = async delayMs => {
65
16
  if (delayMs <= 0) {
66
17
  return;
package/lib/stats.js CHANGED
@@ -34,6 +34,10 @@ export const samplingDistributionOfRelativeDifferenceOfMeans = (a, b) => ({
34
34
  mean: (b.mean - a.mean) / a.mean,
35
35
  variance: (a.variance * b.mean * b.mean + b.variance * a.mean * a.mean) / (a.mean * a.mean * a.mean * a.mean)
36
36
  });
37
+ export const samplingDistributionOfDifferenceOfMeans = (a, b) => ({
38
+ mean: b.mean - a.mean,
39
+ variance: a.variance + b.variance
40
+ });
37
41
  export const summaryStats = values => {
38
42
  if (values.length === 0) {
39
43
  throw new Error("Cannot compute stats for an empty sample set.");
@@ -68,9 +72,14 @@ export const computeDifference = (baseline, current) => {
68
72
  mean: current.mean,
69
73
  variance: current.variance
70
74
  }, current.size);
75
+ const absoluteDist = samplingDistributionOfDifferenceOfMeans(baselineDist, currentDist);
71
76
  const relativeDist = samplingDistributionOfRelativeDifferenceOfMeans(baselineDist, currentDist);
72
77
  const size = Math.min(baseline.size, current.size);
73
78
  return {
79
+ absolute: {
80
+ ci: confidenceInterval95(absoluteDist, size),
81
+ mean: absoluteDist.mean
82
+ },
74
83
  relative: {
75
84
  ci: confidenceInterval95(relativeDist, size),
76
85
  mean: relativeDist.mean
@@ -99,7 +108,26 @@ const computePairedRelativeStats = (baselineSamples, currentSamples) => {
99
108
  mean: diffStats.mean
100
109
  };
101
110
  };
111
+ const computePairedAbsoluteStats = (baselineSamples, currentSamples) => {
112
+ const size = Math.min(baselineSamples.length, currentSamples.length);
113
+ if (size === 0) {
114
+ throw new Error("Cannot compute differences with empty sample sets.");
115
+ }
116
+ const diffs = [];
117
+ for (let index = 0; index < size; index += 1) {
118
+ diffs.push(currentSamples[index] - baselineSamples[index]);
119
+ }
120
+ const diffStats = summaryStats(diffs);
121
+ return {
122
+ ci: confidenceInterval95(samplingDistributionOfTheMean({
123
+ mean: diffStats.mean,
124
+ variance: diffStats.variance
125
+ }, diffStats.size), diffStats.size),
126
+ mean: diffStats.mean
127
+ };
128
+ };
102
129
  export const computeRelativeDifferenceFromSamples = (baselineSamples, currentSamples) => ({
130
+ absolute: computePairedAbsoluteStats(baselineSamples, currentSamples),
103
131
  relative: computePairedRelativeStats(baselineSamples, currentSamples)
104
132
  });
105
133
  export const computeDifferences = stats => stats.map(result => ({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "perfshield",
3
- "version": "0.0.5",
3
+ "version": "0.0.7",
4
4
  "description": "A tool for doing web benchmarking across multiple JS engines and with statistical signifigance",
5
5
  "license": "MIT",
6
6
  "type": "module",