perfshield 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -62,10 +62,7 @@ export const benchmarks = [
62
62
  "sampling": {
63
63
  "minSamples": 30,
64
64
  "timeoutMs": 10000,
65
- "conditions": {
66
- "absolute": [0],
67
- "relative": [0]
68
- }
65
+ "conditions": [0]
69
66
  },
70
67
  "report": {
71
68
  "formats": ["console", "json"]
@@ -13,10 +13,7 @@
13
13
  "sampling": {
14
14
  "minSamples": 30,
15
15
  "timeoutMs": 10000,
16
- "conditions": {
17
- "absolute": [0],
18
- "relative": [0]
19
- }
16
+ "conditions": [0]
20
17
  },
21
18
  "report": {
22
19
  "formats": ["console", "json"]
package/lib/config.js CHANGED
@@ -158,26 +158,6 @@ const parseEngineConfig = (value, index, issues) => {
158
158
  name
159
159
  };
160
160
  };
161
- const parseSamplingConditions = (value, issues) => {
162
- const conditions = asObject(value, "config.sampling.conditions", issues);
163
- if (!conditions) {
164
- return null;
165
- }
166
- validateKeys(conditions, ["absolute", "relative"], "config.sampling.conditions", issues);
167
- const absolute = asNumberArray(conditions.absolute, "config.sampling.conditions.absolute", issues, {
168
- minLength: 1
169
- });
170
- const relative = asNumberArray(conditions.relative, "config.sampling.conditions.relative", issues, {
171
- minLength: 1
172
- });
173
- if (!absolute || !relative) {
174
- return null;
175
- }
176
- return {
177
- absolute,
178
- relative
179
- };
180
- };
181
161
  const parseSamplingConfig = (value, issues) => {
182
162
  const sampling = asObject(value, "config.sampling", issues);
183
163
  if (!sampling) {
@@ -192,7 +172,9 @@ const parseSamplingConfig = (value, issues) => {
192
172
  integer: true,
193
173
  min: 1
194
174
  });
195
- const conditions = parseSamplingConditions(sampling.conditions, issues);
175
+ const conditions = asNumberArray(sampling.conditions, "config.sampling.conditions", issues, {
176
+ minLength: 1
177
+ });
196
178
  if (minSamples == null || timeoutMs == null || !conditions) {
197
179
  return null;
198
180
  }
package/lib/regression.js CHANGED
@@ -3,9 +3,8 @@ export const getRegressions = results => {
3
3
  const findings = [];
4
4
  for (const result of results) {
5
5
  for (const entry of result.benchmarks) {
6
- if (isPositiveInterval(entry.difference.absolute.ci) || isPositiveInterval(entry.difference.relative.ci)) {
6
+ if (isPositiveInterval(entry.difference.relative.ci)) {
7
7
  findings.push({
8
- absolute: entry.difference.absolute.ci,
9
8
  benchmark: entry.benchmark.name,
10
9
  engine: result.engine.name,
11
10
  relative: entry.difference.relative.ci
@@ -16,7 +16,7 @@ export const renderConsoleReport = results => {
16
16
  lines.push(`Engine: ${result.engine.name}`);
17
17
  for (const entry of result.benchmarks) {
18
18
  const unit = entry.benchmark.unit != null ? ` ${entry.benchmark.unit}` : "";
19
- const benchmarkLines = [` Benchmark: ${entry.benchmark.name}`, ` baseline mean=${formatNumber(entry.stats.baseline.mean, 4)}${unit} ci=${formatInterval(entry.stats.baseline.meanCI, 4)} sd=${formatNumber(entry.stats.baseline.standardDeviation, 4)}`, ` current mean=${formatNumber(entry.stats.current.mean, 4)}${unit} ci=${formatInterval(entry.stats.current.meanCI, 4)} sd=${formatNumber(entry.stats.current.standardDeviation, 4)}`, ` diff abs mean=${formatNumber(entry.difference.absolute.mean, 4)}${unit} ci=${formatInterval(entry.difference.absolute.ci, 4)}`, ` diff rel mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)}`];
19
+ const benchmarkLines = [` Benchmark: ${entry.benchmark.name}`, ` baseline mean=${formatNumber(entry.stats.baseline.mean, 4)}${unit} ci=${formatInterval(entry.stats.baseline.meanCI, 4)} sd=${formatNumber(entry.stats.baseline.standardDeviation, 4)}`, ` current mean=${formatNumber(entry.stats.current.mean, 4)}${unit} ci=${formatInterval(entry.stats.current.meanCI, 4)} sd=${formatNumber(entry.stats.current.standardDeviation, 4)}`, ` diff rel mean=${formatRelativeValue(entry.difference.relative.mean, 2)} ci=${formatRelativeInterval(entry.difference.relative.ci, 2)}`];
20
20
  lines.push(...benchmarkLines);
21
21
  }
22
22
  lines.push("");
package/lib/runner.js CHANGED
@@ -59,19 +59,39 @@ const buildHarnessIfNeeded = async sourcePath => {
59
59
  path: harnessPath
60
60
  };
61
61
  };
62
- const warmupBenchmarks = async (harness, benchmarks) => {
62
+ const withFreshHarness = async (engine, harnessPath, baselinePath, currentPath, callback) => {
63
+ const harness = await createNodeHarness(engine, harnessPath, baselinePath, currentPath);
64
+ try {
65
+ return await callback(harness);
66
+ } finally {
67
+ await harness.close();
68
+ }
69
+ };
70
+ const runIterationInFreshHarness = async (engine, harnessPath, baselinePath, currentPath, index, iterations, order) => await withFreshHarness(engine, harnessPath, baselinePath, currentPath, async harness => {
71
+ const results = {};
72
+ for (const version of order) {
73
+ const payload = {
74
+ index,
75
+ version
76
+ };
77
+ if (iterations != null) {
78
+ payload.iterations = iterations;
79
+ }
80
+ const result = await harness.runSample(payload);
81
+ results[version] = result.durationMs;
82
+ }
83
+ return results;
84
+ });
85
+ const warmupBenchmarks = async (engine, harnessPath, baselinePath, currentPath, benchmarks) => {
86
+ let roundRobinSeed = 0;
63
87
  for (let index = 0; index < benchmarks.length; index += 1) {
64
88
  const descriptor = benchmarks[index];
65
- for (const version of versions) {
66
- await harness.runSample({
67
- index,
68
- iterations: descriptor.iterations,
69
- version
70
- });
71
- }
89
+ const order = getVersionOrder(roundRobinSeed);
90
+ roundRobinSeed += 1;
91
+ await runIterationInFreshHarness(engine, harnessPath, baselinePath, currentPath, index, descriptor.iterations, order);
72
92
  }
73
93
  };
74
- const collectSamples = async (harness, benchmarks, minSamples) => {
94
+ const collectSamples = async (engine, harnessPath, baselinePath, currentPath, benchmarks, minSamples) => {
75
95
  const samples = benchmarks.map(() => ({
76
96
  baseline: [],
77
97
  current: []
@@ -82,17 +102,12 @@ const collectSamples = async (harness, benchmarks, minSamples) => {
82
102
  const descriptor = benchmarks[index];
83
103
  const order = getVersionOrder(roundRobinSeed);
84
104
  roundRobinSeed += 1;
85
- for (const version of order) {
86
- const result = await harness.runSample({
87
- index,
88
- iterations: descriptor.iterations,
89
- version
90
- });
91
- if (version === "baseline") {
92
- samples[index].baseline.push(result.durationMs);
93
- } else {
94
- samples[index].current.push(result.durationMs);
95
- }
105
+ const result = await runIterationInFreshHarness(engine, harnessPath, baselinePath, currentPath, index, descriptor.iterations, order);
106
+ if (result.baseline != null) {
107
+ samples[index].baseline.push(result.baseline);
108
+ }
109
+ if (result.current != null) {
110
+ samples[index].current.push(result.current);
96
111
  }
97
112
  }
98
113
  }
@@ -103,19 +118,14 @@ const autoSampleResolved = (samples, conditions) => samples.every(bucket => {
103
118
  const baselineStats = summaryStats(bucket.baseline);
104
119
  const currentStats = summaryStats(bucket.current);
105
120
  const diff = computeDifference(baselineStats, currentStats);
106
- for (const condition of conditions.absolute) {
107
- if (intervalContains(diff.absolute.ci, condition)) {
108
- return false;
109
- }
110
- }
111
- for (const condition of conditions.relative) {
121
+ for (const condition of conditions) {
112
122
  if (intervalContains(diff.relative.ci, condition)) {
113
123
  return false;
114
124
  }
115
125
  }
116
126
  return true;
117
127
  });
118
- const autoSample = async (harness, benchmarks, samples, conditions, timeoutMs) => {
128
+ const autoSample = async (engine, harnessPath, baselinePath, currentPath, benchmarks, samples, conditions, timeoutMs) => {
119
129
  const startTime = Date.now();
120
130
  let roundRobinSeed = 0;
121
131
  while (Date.now() - startTime < timeoutMs) {
@@ -127,17 +137,12 @@ const autoSample = async (harness, benchmarks, samples, conditions, timeoutMs) =
127
137
  const descriptor = benchmarks[index];
128
138
  const order = getVersionOrder(roundRobinSeed);
129
139
  roundRobinSeed += 1;
130
- for (const version of order) {
131
- const result = await harness.runSample({
132
- index,
133
- iterations: descriptor.iterations,
134
- version
135
- });
136
- if (version === "baseline") {
137
- samples[index].baseline.push(result.durationMs);
138
- } else {
139
- samples[index].current.push(result.durationMs);
140
- }
140
+ const result = await runIterationInFreshHarness(engine, harnessPath, baselinePath, currentPath, index, descriptor.iterations, order);
141
+ if (result.baseline != null) {
142
+ samples[index].baseline.push(result.baseline);
143
+ }
144
+ if (result.current != null) {
145
+ samples[index].current.push(result.current);
141
146
  }
142
147
  }
143
148
  }
@@ -151,12 +156,13 @@ export const runEngineComparison = async options => {
151
156
  engine
152
157
  } = options;
153
158
  const harnessArtifact = await buildHarnessIfNeeded(getHarnessPath());
154
- const harness = await createNodeHarness(engine, harnessArtifact.path, resolve(baselinePath), resolve(currentPath));
159
+ const resolvedBaseline = resolve(baselinePath);
160
+ const resolvedCurrent = resolve(currentPath);
155
161
  try {
156
- const benchmarks = await harness.listBenchmarks();
157
- await warmupBenchmarks(harness, benchmarks);
158
- const samples = await collectSamples(harness, benchmarks, config.sampling.minSamples);
159
- await autoSample(harness, benchmarks, samples, config.sampling.conditions, config.sampling.timeoutMs);
162
+ const benchmarks = await withFreshHarness(engine, harnessArtifact.path, resolvedBaseline, resolvedCurrent, async harness => await harness.listBenchmarks());
163
+ await warmupBenchmarks(engine, harnessArtifact.path, resolvedBaseline, resolvedCurrent, benchmarks);
164
+ const samples = await collectSamples(engine, harnessArtifact.path, resolvedBaseline, resolvedCurrent, benchmarks, config.sampling.minSamples);
165
+ await autoSample(engine, harnessArtifact.path, resolvedBaseline, resolvedCurrent, benchmarks, samples, config.sampling.conditions, config.sampling.timeoutMs);
160
166
  const benchmarkResults = benchmarks.map((benchmark, index) => {
161
167
  const baselineSamples = samples[index].baseline;
162
168
  const currentSamples = samples[index].current;
@@ -181,7 +187,6 @@ export const runEngineComparison = async options => {
181
187
  engine
182
188
  };
183
189
  } finally {
184
- await harness.close();
185
190
  if (harnessArtifact.cleanup) {
186
191
  await harnessArtifact.cleanup();
187
192
  }
package/lib/stats.js CHANGED
@@ -23,10 +23,6 @@ export const samplingDistributionOfTheMean = (distribution, sampleSize) => ({
23
23
  mean: distribution.mean,
24
24
  variance: distribution.variance / sampleSize
25
25
  });
26
- export const samplingDistributionOfAbsoluteDifferenceOfMeans = (a, b) => ({
27
- mean: b.mean - a.mean,
28
- variance: a.variance + b.variance
29
- });
30
26
  export const samplingDistributionOfRelativeDifferenceOfMeans = (a, b) => ({
31
27
  mean: (b.mean - a.mean) / a.mean,
32
28
  variance: (a.variance * b.mean * b.mean + b.variance * a.mean * a.mean) / (a.mean * a.mean * a.mean * a.mean)
@@ -65,14 +61,9 @@ export const computeDifference = (baseline, current) => {
65
61
  mean: current.mean,
66
62
  variance: current.variance
67
63
  }, current.size);
68
- const absoluteDist = samplingDistributionOfAbsoluteDifferenceOfMeans(baselineDist, currentDist);
69
64
  const relativeDist = samplingDistributionOfRelativeDifferenceOfMeans(baselineDist, currentDist);
70
65
  const size = Math.min(baseline.size, current.size);
71
66
  return {
72
- absolute: {
73
- ci: confidenceInterval95(absoluteDist, size),
74
- mean: absoluteDist.mean
75
- },
76
67
  relative: {
77
68
  ci: confidenceInterval95(relativeDist, size),
78
69
  mean: relativeDist.mean
@@ -92,12 +83,7 @@ export const autoSampleConditionsResolved = (resultStats, conditions) => {
92
83
  if (diff == null) {
93
84
  continue;
94
85
  }
95
- for (const condition of conditions.absolute) {
96
- if (intervalContains(diff.absolute.ci, condition)) {
97
- return false;
98
- }
99
- }
100
- for (const condition of conditions.relative) {
86
+ for (const condition of conditions) {
101
87
  if (intervalContains(diff.relative.ci, condition)) {
102
88
  return false;
103
89
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "perfshield",
3
- "version": "0.0.2",
3
+ "version": "0.0.4",
4
4
  "description": "A tool for doing web benchmarking across multiple JS engines and with statistical signifigance",
5
5
  "license": "MIT",
6
6
  "type": "module",