flexi-bench 0.0.0-alpha.4 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/dist/api-types.d.ts +25 -0
- package/dist/api-types.js +35 -0
- package/dist/benchmark-runner.js +5 -5
- package/dist/benchmark.d.ts +2 -1
- package/dist/benchmark.js +150 -141
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/performance-observer.js +7 -20
- package/dist/reporters/benchmark-console-reporter.js +14 -13
- package/dist/reporters/markdown-benchmark-reporter.js +18 -18
- package/dist/reporters/noop-reporter.js +1 -3
- package/dist/reporters/suite-console-reporter.js +10 -9
- package/dist/results.d.ts +10 -2
- package/dist/results.js +17 -5
- package/dist/shared-api.d.ts +3 -1
- package/dist/shared-api.js +10 -6
- package/dist/suite.d.ts +11 -1
- package/dist/suite.js +40 -24
- package/dist/variation.js +7 -3
- package/package.json +5 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,27 @@
|
|
|
1
|
+
## 0.1.0 (2024-07-28)
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
### 🚀 Features
|
|
5
|
+
|
|
6
|
+
- add different error strategies for handling failing actions ([#11](https://github.com/AgentEnder/flexi-bench/pull/11))
|
|
7
|
+
- **repo:** add ability to publish only docs ([4cde432](https://github.com/AgentEnder/flexi-bench/commit/4cde432))
|
|
8
|
+
|
|
9
|
+
### ❤️ Thank You
|
|
10
|
+
|
|
11
|
+
- Craigory Coppola @AgentEnder
|
|
12
|
+
|
|
13
|
+
# 0.0.0 (2024-07-28)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
### 🚀 Features
|
|
17
|
+
|
|
18
|
+
- add different error strategies for handling failing actions ([#11](https://github.com/AgentEnder/flexi-bench/pull/11))
|
|
19
|
+
- **repo:** add ability to publish only docs ([4cde432](https://github.com/AgentEnder/flexi-bench/commit/4cde432))
|
|
20
|
+
|
|
21
|
+
### ❤️ Thank You
|
|
22
|
+
|
|
23
|
+
- Craigory Coppola @AgentEnder
|
|
24
|
+
|
|
1
25
|
## 0.0.0-alpha.4 (2024-07-17)
|
|
2
26
|
|
|
3
27
|
|
package/dist/api-types.d.ts
CHANGED
|
@@ -21,3 +21,28 @@ export interface BenchmarkReporter {
|
|
|
21
21
|
export interface SuiteReporter {
|
|
22
22
|
report: (results: Record<string, Result[]>) => void;
|
|
23
23
|
}
|
|
24
|
+
/**
|
|
25
|
+
* The strategy to use when an error occurs during a benchmark run.
|
|
26
|
+
*/
|
|
27
|
+
export declare enum ErrorStrategy {
|
|
28
|
+
/**
|
|
29
|
+
* Continue running the benchmark. Errors will be collected and reported at the end. This is the default behavior.
|
|
30
|
+
*/
|
|
31
|
+
Continue = "continue",
|
|
32
|
+
/**
|
|
33
|
+
* Abort the benchmark run immediately when an error occurs.
|
|
34
|
+
*/
|
|
35
|
+
Abort = "abort",
|
|
36
|
+
/**
|
|
37
|
+
* Delay the error until the end of the benchmark run. This is useful when you want to see all the errors at once
|
|
38
|
+
*/
|
|
39
|
+
DelayedThrow = "delayed-throw"
|
|
40
|
+
}
|
|
41
|
+
export declare class AggregateBenchmarkError extends Error {
|
|
42
|
+
results: Result[];
|
|
43
|
+
constructor(results: Result[]);
|
|
44
|
+
}
|
|
45
|
+
export declare class AggregateSuiteError extends Error {
|
|
46
|
+
results: Record<string, Result[]>;
|
|
47
|
+
constructor(results: Record<string, Result[]>);
|
|
48
|
+
}
|
package/dist/api-types.js
CHANGED
|
@@ -1,2 +1,37 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.AggregateSuiteError = exports.AggregateBenchmarkError = exports.ErrorStrategy = void 0;
|
|
4
|
+
/**
|
|
5
|
+
* The strategy to use when an error occurs during a benchmark run.
|
|
6
|
+
*/
|
|
7
|
+
var ErrorStrategy;
|
|
8
|
+
(function (ErrorStrategy) {
|
|
9
|
+
/**
|
|
10
|
+
* Continue running the benchmark. Errors will be collected and reported at the end. This is the default behavior.
|
|
11
|
+
*/
|
|
12
|
+
ErrorStrategy["Continue"] = "continue";
|
|
13
|
+
/**
|
|
14
|
+
* Abort the benchmark run immediately when an error occurs.
|
|
15
|
+
*/
|
|
16
|
+
ErrorStrategy["Abort"] = "abort";
|
|
17
|
+
/**
|
|
18
|
+
* Delay the error until the end of the benchmark run. This is useful when you want to see all the errors at once
|
|
19
|
+
*/
|
|
20
|
+
ErrorStrategy["DelayedThrow"] = "delayed-throw";
|
|
21
|
+
})(ErrorStrategy || (exports.ErrorStrategy = ErrorStrategy = {}));
|
|
22
|
+
class AggregateBenchmarkError extends Error {
|
|
23
|
+
results;
|
|
24
|
+
constructor(results) {
|
|
25
|
+
super('[AggregateBenchmarkError]: One or more benchmarks failed. Check the results for more information.');
|
|
26
|
+
this.results = results;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
exports.AggregateBenchmarkError = AggregateBenchmarkError;
|
|
30
|
+
class AggregateSuiteError extends Error {
|
|
31
|
+
results;
|
|
32
|
+
constructor(results) {
|
|
33
|
+
super('[AggregateSuiteError]: One or more benchmarks failed. Check the results for more information.');
|
|
34
|
+
this.results = results;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
exports.AggregateSuiteError = AggregateSuiteError;
|
package/dist/benchmark-runner.js
CHANGED
|
@@ -23,7 +23,7 @@ function suite(name, fn) {
|
|
|
23
23
|
activeSuite = suite;
|
|
24
24
|
const transformed = fn(suite);
|
|
25
25
|
activeSuite = null;
|
|
26
|
-
return (transformed
|
|
26
|
+
return (transformed ?? suite).run();
|
|
27
27
|
}
|
|
28
28
|
let activeBenchmark = null;
|
|
29
29
|
/**
|
|
@@ -38,10 +38,10 @@ function benchmark(name, fn) {
|
|
|
38
38
|
const transformed = fn(benchmark);
|
|
39
39
|
activeBenchmark = null;
|
|
40
40
|
if (activeSuite) {
|
|
41
|
-
activeSuite.addBenchmark(transformed
|
|
41
|
+
activeSuite.addBenchmark(transformed ?? benchmark);
|
|
42
42
|
}
|
|
43
43
|
else {
|
|
44
|
-
return (transformed
|
|
44
|
+
return (transformed ?? benchmark).run();
|
|
45
45
|
}
|
|
46
46
|
}
|
|
47
47
|
let activeVariation = null;
|
|
@@ -57,10 +57,10 @@ function variation(name, fn) {
|
|
|
57
57
|
const transformed = fn(variation);
|
|
58
58
|
activeVariation = null;
|
|
59
59
|
if (activeBenchmark) {
|
|
60
|
-
activeBenchmark.withVariation(transformed
|
|
60
|
+
activeBenchmark.withVariation(transformed ?? variation);
|
|
61
61
|
}
|
|
62
62
|
else if (activeSuite) {
|
|
63
|
-
activeSuite.withVariation(transformed
|
|
63
|
+
activeSuite.withVariation(transformed ?? variation);
|
|
64
64
|
}
|
|
65
65
|
else {
|
|
66
66
|
throw new Error('`variation` must be called within a benchmark or suite');
|
package/dist/benchmark.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { Variation } from './variation';
|
|
2
|
-
import { TeardownMethod, SetupMethod, Action, BenchmarkReporter } from './api-types';
|
|
2
|
+
import { TeardownMethod, SetupMethod, Action, BenchmarkReporter, ErrorStrategy } from './api-types';
|
|
3
3
|
import { Result } from './results';
|
|
4
4
|
import { PerformanceObserverOptions } from './performance-observer';
|
|
5
5
|
import { BenchmarkBase } from './shared-api';
|
|
@@ -10,6 +10,7 @@ export declare class Benchmark extends BenchmarkBase {
|
|
|
10
10
|
private timeout?;
|
|
11
11
|
private reporter;
|
|
12
12
|
private watcher?;
|
|
13
|
+
errorStrategy: ErrorStrategy;
|
|
13
14
|
constructor(name: string, options?: {
|
|
14
15
|
setup?: SetupMethod;
|
|
15
16
|
teardown?: TeardownMethod;
|
package/dist/benchmark.js
CHANGED
|
@@ -1,42 +1,40 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
-
});
|
|
10
|
-
};
|
|
11
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
3
|
exports.Benchmark = void 0;
|
|
13
4
|
const benchmark_console_reporter_1 = require("./reporters/benchmark-console-reporter");
|
|
14
5
|
const variation_1 = require("./variation");
|
|
6
|
+
const api_types_1 = require("./api-types");
|
|
15
7
|
const child_process_1 = require("child_process");
|
|
16
8
|
const results_1 = require("./results");
|
|
17
9
|
const performance_observer_1 = require("./performance-observer");
|
|
18
10
|
const shared_api_1 = require("./shared-api");
|
|
19
11
|
class Benchmark extends shared_api_1.BenchmarkBase {
|
|
12
|
+
name;
|
|
13
|
+
variations = [];
|
|
14
|
+
iterations;
|
|
15
|
+
timeout;
|
|
16
|
+
reporter;
|
|
17
|
+
watcher;
|
|
18
|
+
errorStrategy = api_types_1.ErrorStrategy.Continue;
|
|
20
19
|
constructor(name, options) {
|
|
21
20
|
super();
|
|
22
21
|
this.name = name;
|
|
23
|
-
|
|
24
|
-
if (options === null || options === void 0 ? void 0 : options.action) {
|
|
22
|
+
if (options?.action) {
|
|
25
23
|
this.action = options.action;
|
|
26
24
|
}
|
|
27
|
-
if (options
|
|
25
|
+
if (options?.setup) {
|
|
28
26
|
this.setupMethods.push(options.setup);
|
|
29
27
|
}
|
|
30
|
-
if (options
|
|
28
|
+
if (options?.teardown) {
|
|
31
29
|
this.teardownMethods.push(options.teardown);
|
|
32
30
|
}
|
|
33
|
-
if (options
|
|
31
|
+
if (options?.iterations) {
|
|
34
32
|
this.iterations = options.iterations;
|
|
35
33
|
}
|
|
36
|
-
if (options
|
|
34
|
+
if (options?.timeout) {
|
|
37
35
|
this.timeout = options.timeout;
|
|
38
36
|
}
|
|
39
|
-
this.reporter =
|
|
37
|
+
this.reporter = options?.reporter || new benchmark_console_reporter_1.BenchmarkConsoleReporter();
|
|
40
38
|
}
|
|
41
39
|
withVariation(nameOrVariation, builder) {
|
|
42
40
|
if (nameOrVariation instanceof variation_1.Variation) {
|
|
@@ -83,117 +81,119 @@ class Benchmark extends shared_api_1.BenchmarkBase {
|
|
|
83
81
|
this.watcher = new performance_observer_1.PerformanceWatcher(options);
|
|
84
82
|
return this;
|
|
85
83
|
}
|
|
86
|
-
run() {
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
84
|
+
async run() {
|
|
85
|
+
this.validate();
|
|
86
|
+
let results = [];
|
|
87
|
+
if (this.variations.length === 0) {
|
|
88
|
+
this.variations.push(new variation_1.Variation('default'));
|
|
89
|
+
}
|
|
90
|
+
const totalIterations = this.iterations
|
|
91
|
+
? this.variations.length * this.iterations
|
|
92
|
+
: undefined;
|
|
93
|
+
const startTime = performance.now();
|
|
94
|
+
let totalCompletedIterations = 0;
|
|
95
|
+
for (let variationIndex = 0; variationIndex < this.variations.length; variationIndex++) {
|
|
96
|
+
const variation = this.variations[variationIndex];
|
|
97
|
+
const iterationResults = [];
|
|
98
|
+
// SETUP
|
|
99
|
+
const oldEnv = { ...process.env };
|
|
100
|
+
process.env = {
|
|
101
|
+
...process.env,
|
|
102
|
+
...variation.environment,
|
|
103
|
+
};
|
|
104
|
+
for (const setup of this.setupMethods) {
|
|
105
|
+
await setup(variation);
|
|
93
106
|
}
|
|
94
|
-
const
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
}
|
|
108
|
-
for (const setup of variation.setupMethods) {
|
|
109
|
-
yield setup(variation);
|
|
110
|
-
}
|
|
111
|
-
// ACT
|
|
112
|
-
const benchmarkThis = this;
|
|
113
|
-
yield new Promise((resolve, reject) => __awaiter(this, void 0, void 0, function* () {
|
|
114
|
-
let completedIterations = 0;
|
|
115
|
-
let timeout = benchmarkThis.timeout
|
|
116
|
-
? setTimeout(() => {
|
|
117
|
-
running = false;
|
|
118
|
-
if ((benchmarkThis === null || benchmarkThis === void 0 ? void 0 : benchmarkThis.iterations) &&
|
|
119
|
-
completedIterations < benchmarkThis.iterations) {
|
|
120
|
-
reject('Timeout');
|
|
121
|
-
}
|
|
122
|
-
resolve();
|
|
123
|
-
}, benchmarkThis.timeout)
|
|
124
|
-
: null;
|
|
125
|
-
let running = true;
|
|
126
|
-
while (running) {
|
|
127
|
-
for (const setup of this.setupEachMethods.concat(variation.setupEachMethods)) {
|
|
128
|
-
yield setup(variation);
|
|
129
|
-
}
|
|
130
|
-
const a = performance.now();
|
|
131
|
-
if (variation.action) {
|
|
132
|
-
yield runAction(variation.action, variation);
|
|
133
|
-
}
|
|
134
|
-
else if (this.action) {
|
|
135
|
-
yield runAction(this.action, variation);
|
|
136
|
-
}
|
|
137
|
-
const b = performance.now();
|
|
138
|
-
for (const teardown of this.teardownEachMethods.concat(variation.teardownEachMethods)) {
|
|
139
|
-
yield teardown(variation);
|
|
140
|
-
}
|
|
141
|
-
const duration = b - a;
|
|
142
|
-
completedIterations++;
|
|
143
|
-
totalCompletedIterations++;
|
|
144
|
-
timings.push(duration);
|
|
145
|
-
if (this.reporter.progress &&
|
|
146
|
-
totalIterations &&
|
|
147
|
-
benchmarkThis.iterations) {
|
|
148
|
-
this.reporter.progress(variation.name, totalCompletedIterations / totalIterations, {
|
|
149
|
-
timeElapsed: performance.now() - startTime,
|
|
150
|
-
totalIterations,
|
|
151
|
-
completedIterations: totalCompletedIterations,
|
|
152
|
-
timeout: benchmarkThis.timeout,
|
|
153
|
-
});
|
|
107
|
+
for (const setup of variation.setupMethods) {
|
|
108
|
+
await setup(variation);
|
|
109
|
+
}
|
|
110
|
+
// ACT
|
|
111
|
+
const benchmarkThis = this;
|
|
112
|
+
await new Promise(async (resolve, reject) => {
|
|
113
|
+
let completedIterations = 0;
|
|
114
|
+
let timeout = benchmarkThis.timeout
|
|
115
|
+
? setTimeout(() => {
|
|
116
|
+
running = false;
|
|
117
|
+
if (benchmarkThis?.iterations &&
|
|
118
|
+
completedIterations < benchmarkThis.iterations) {
|
|
119
|
+
reject('Timeout');
|
|
154
120
|
}
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
121
|
+
resolve();
|
|
122
|
+
}, benchmarkThis.timeout)
|
|
123
|
+
: null;
|
|
124
|
+
let running = true;
|
|
125
|
+
while (running) {
|
|
126
|
+
for (const setup of this.setupEachMethods.concat(variation.setupEachMethods)) {
|
|
127
|
+
await setup(variation);
|
|
128
|
+
}
|
|
129
|
+
const result = await runAndMeasureAction(benchmarkThis, variation);
|
|
130
|
+
const errorStrategy = variation.errorStrategy ?? benchmarkThis.errorStrategy;
|
|
131
|
+
if (errorStrategy === api_types_1.ErrorStrategy.Abort &&
|
|
132
|
+
result instanceof Error) {
|
|
133
|
+
reject(result);
|
|
134
|
+
return;
|
|
135
|
+
}
|
|
136
|
+
for (const teardown of this.teardownEachMethods.concat(variation.teardownEachMethods)) {
|
|
137
|
+
await teardown(variation);
|
|
138
|
+
}
|
|
139
|
+
completedIterations++;
|
|
140
|
+
totalCompletedIterations++;
|
|
141
|
+
iterationResults.push(result);
|
|
142
|
+
if (this.reporter.progress &&
|
|
143
|
+
totalIterations &&
|
|
144
|
+
benchmarkThis.iterations) {
|
|
145
|
+
this.reporter.progress(variation.name, totalCompletedIterations / totalIterations, {
|
|
146
|
+
timeElapsed: performance.now() - startTime,
|
|
147
|
+
totalIterations,
|
|
148
|
+
completedIterations: totalCompletedIterations,
|
|
149
|
+
timeout: benchmarkThis.timeout,
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
if (benchmarkThis?.iterations &&
|
|
153
|
+
completedIterations >= benchmarkThis.iterations) {
|
|
154
|
+
running = false;
|
|
155
|
+
if (timeout) {
|
|
156
|
+
clearTimeout(timeout);
|
|
162
157
|
}
|
|
158
|
+
resolve();
|
|
163
159
|
}
|
|
164
|
-
}));
|
|
165
|
-
if (this.reporter.progress && !totalIterations) {
|
|
166
|
-
this.reporter.progress(variation.name, variationIndex / this.variations.length, {
|
|
167
|
-
timeElapsed: performance.now() - startTime,
|
|
168
|
-
timeout: this.timeout,
|
|
169
|
-
completedIterations: totalCompletedIterations,
|
|
170
|
-
});
|
|
171
|
-
}
|
|
172
|
-
// TEARDOWN
|
|
173
|
-
for (const teardown of variation.teardownMethods) {
|
|
174
|
-
yield teardown(variation);
|
|
175
|
-
}
|
|
176
|
-
for (const teardown of this.teardownMethods) {
|
|
177
|
-
yield teardown(variation);
|
|
178
160
|
}
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
161
|
+
});
|
|
162
|
+
if (this.reporter.progress && !totalIterations) {
|
|
163
|
+
this.reporter.progress(variation.name, variationIndex / this.variations.length, {
|
|
164
|
+
timeElapsed: performance.now() - startTime,
|
|
165
|
+
timeout: this.timeout,
|
|
166
|
+
completedIterations: totalCompletedIterations,
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
// TEARDOWN
|
|
170
|
+
for (const teardown of variation.teardownMethods) {
|
|
171
|
+
await teardown(variation);
|
|
172
|
+
}
|
|
173
|
+
for (const teardown of this.teardownMethods) {
|
|
174
|
+
await teardown(variation);
|
|
175
|
+
}
|
|
176
|
+
process.env = oldEnv;
|
|
177
|
+
// REPORT
|
|
178
|
+
const result = (0, results_1.calculateResultsFromDurations)(variation.name, iterationResults);
|
|
179
|
+
// PerformanceObserver needs a chance to flush
|
|
180
|
+
if (this.watcher) {
|
|
181
|
+
const measures = await this.watcher.getMeasures();
|
|
182
|
+
for (const key in measures) {
|
|
183
|
+
result.subresults ??= [];
|
|
184
|
+
result.subresults.push((0, results_1.calculateResultsFromDurations)(key, measures[key]));
|
|
190
185
|
}
|
|
191
|
-
|
|
186
|
+
this.watcher.clearMeasures();
|
|
192
187
|
}
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
188
|
+
results.push(result);
|
|
189
|
+
}
|
|
190
|
+
this.watcher?.disconnect();
|
|
191
|
+
this.reporter.report(this, results);
|
|
192
|
+
if (this.errorStrategy === api_types_1.ErrorStrategy.DelayedThrow &&
|
|
193
|
+
results.some((r) => r.failed)) {
|
|
194
|
+
throw new api_types_1.AggregateBenchmarkError(results);
|
|
195
|
+
}
|
|
196
|
+
return results;
|
|
197
197
|
}
|
|
198
198
|
validate() {
|
|
199
199
|
if (!this.timeout && !this.iterations) {
|
|
@@ -224,26 +224,35 @@ class Benchmark extends shared_api_1.BenchmarkBase {
|
|
|
224
224
|
}
|
|
225
225
|
}
|
|
226
226
|
exports.Benchmark = Benchmark;
|
|
227
|
-
function
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
227
|
+
async function runAndMeasureAction(benchmark, variation) {
|
|
228
|
+
try {
|
|
229
|
+
const a = performance.now();
|
|
230
|
+
await runAction((variation.action ?? benchmark.action), variation);
|
|
231
|
+
const b = performance.now();
|
|
232
|
+
return b - a;
|
|
233
|
+
}
|
|
234
|
+
catch (e) {
|
|
235
|
+
return e instanceof Error ? e : new Error('Unknown error during action.');
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
async function runAction(action, variation) {
|
|
239
|
+
if (typeof action === 'string') {
|
|
240
|
+
return new Promise((resolve, reject) => {
|
|
241
|
+
const child = (0, child_process_1.spawn)(action, variation.cliArgs, {
|
|
242
|
+
shell: true,
|
|
243
|
+
windowsHide: true,
|
|
243
244
|
});
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
245
|
+
child.on('exit', (code) => {
|
|
246
|
+
if (code === 0) {
|
|
247
|
+
resolve();
|
|
248
|
+
}
|
|
249
|
+
else {
|
|
250
|
+
reject(`Action failed with code ${code}`);
|
|
251
|
+
}
|
|
252
|
+
});
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
else {
|
|
256
|
+
return action(variation);
|
|
257
|
+
}
|
|
249
258
|
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
export * from './reporters/benchmark-console-reporter';
|
|
2
2
|
export * from './reporters/markdown-benchmark-reporter';
|
|
3
3
|
export * from './reporters/suite-console-reporter';
|
|
4
|
+
export * from './reporters/noop-reporter';
|
|
4
5
|
export * from './api-types';
|
|
5
6
|
export * from './benchmark';
|
|
6
7
|
export * from './variation';
|
package/dist/index.js
CHANGED
|
@@ -17,6 +17,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
17
17
|
__exportStar(require("./reporters/benchmark-console-reporter"), exports);
|
|
18
18
|
__exportStar(require("./reporters/markdown-benchmark-reporter"), exports);
|
|
19
19
|
__exportStar(require("./reporters/suite-console-reporter"), exports);
|
|
20
|
+
__exportStar(require("./reporters/noop-reporter"), exports);
|
|
20
21
|
__exportStar(require("./api-types"), exports);
|
|
21
22
|
__exportStar(require("./benchmark"), exports);
|
|
22
23
|
__exportStar(require("./variation"), exports);
|
|
@@ -1,27 +1,17 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
-
});
|
|
10
|
-
};
|
|
11
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
3
|
exports.PerformanceWatcher = void 0;
|
|
13
4
|
const node_perf_hooks_1 = require("node:perf_hooks");
|
|
14
5
|
class PerformanceWatcher {
|
|
6
|
+
observer;
|
|
7
|
+
measures = {};
|
|
15
8
|
constructor(opts = {}) {
|
|
16
|
-
this.measures = {};
|
|
17
9
|
this.observer = new node_perf_hooks_1.PerformanceObserver((list) => {
|
|
18
|
-
var _a;
|
|
19
|
-
var _b;
|
|
20
10
|
for (const entry of list.getEntries()) {
|
|
21
11
|
if (entry.entryType === 'measure' &&
|
|
22
12
|
(!opts.measureFilter || opts.measureFilter(entry))) {
|
|
23
|
-
const label = normalizeLabel(entry.name, opts
|
|
24
|
-
|
|
13
|
+
const label = normalizeLabel(entry.name, opts?.label);
|
|
14
|
+
this.measures[label] ??= [];
|
|
25
15
|
this.measures[label].push(entry.duration);
|
|
26
16
|
}
|
|
27
17
|
}
|
|
@@ -33,10 +23,8 @@ class PerformanceWatcher {
|
|
|
33
23
|
// not fire until the next tick so we need that to happen before the measures are
|
|
34
24
|
// retrieved. Since this method is async, it must be awaited, which actually gives
|
|
35
25
|
// the observer time to collect the measures.
|
|
36
|
-
getMeasures() {
|
|
37
|
-
return
|
|
38
|
-
return this.measures;
|
|
39
|
-
});
|
|
26
|
+
async getMeasures() {
|
|
27
|
+
return this.measures;
|
|
40
28
|
}
|
|
41
29
|
clearMeasures() {
|
|
42
30
|
for (const key in this.measures) {
|
|
@@ -44,8 +32,7 @@ class PerformanceWatcher {
|
|
|
44
32
|
}
|
|
45
33
|
}
|
|
46
34
|
disconnect() {
|
|
47
|
-
|
|
48
|
-
(_a = this.observer) === null || _a === void 0 ? void 0 : _a.disconnect();
|
|
35
|
+
this.observer?.disconnect();
|
|
49
36
|
delete this.observer;
|
|
50
37
|
}
|
|
51
38
|
}
|
|
@@ -3,26 +3,27 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.BenchmarkConsoleReporter = void 0;
|
|
4
4
|
const cli_progress_1 = require("cli-progress");
|
|
5
5
|
class BenchmarkConsoleReporter {
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
}
|
|
6
|
+
bar = new cli_progress_1.SingleBar({
|
|
7
|
+
format: 'Running variation {label}: {bar} {percentage}% | {value}/{total} - ETA: {eta}s',
|
|
8
|
+
barCompleteChar: '\u2588',
|
|
9
|
+
barIncompleteChar: '\u2591',
|
|
10
|
+
hideCursor: true,
|
|
11
|
+
stopOnComplete: true,
|
|
12
|
+
clearOnComplete: true,
|
|
13
|
+
});
|
|
14
|
+
constructor() { }
|
|
16
15
|
progress(name, percent, context) {
|
|
17
|
-
var _a;
|
|
18
16
|
if (!this.bar.isActive) {
|
|
19
|
-
this.bar.start(
|
|
17
|
+
this.bar.start(context.totalIterations ?? 100, 0);
|
|
20
18
|
}
|
|
21
19
|
this.bar.update(context.completedIterations, { label: name });
|
|
22
20
|
}
|
|
23
21
|
report(benchmark, results) {
|
|
22
|
+
const tableEntries = results.map(({ raw, ...rest }) => ({
|
|
23
|
+
...rest,
|
|
24
|
+
}));
|
|
24
25
|
console.log(`Benchmark: ${benchmark.name}`);
|
|
25
|
-
console.table(
|
|
26
|
+
console.table(tableEntries);
|
|
26
27
|
}
|
|
27
28
|
}
|
|
28
29
|
exports.BenchmarkConsoleReporter = BenchmarkConsoleReporter;
|
|
@@ -4,26 +4,26 @@ exports.MarkdownBenchmarkReporter = void 0;
|
|
|
4
4
|
const fs_1 = require("fs");
|
|
5
5
|
const markdown_factory_1 = require("markdown-factory");
|
|
6
6
|
class MarkdownBenchmarkReporter {
|
|
7
|
+
outputFile;
|
|
8
|
+
fields;
|
|
7
9
|
constructor(opts) {
|
|
8
|
-
var _a;
|
|
9
|
-
this.report = (benchmark, results) => {
|
|
10
|
-
(0, fs_1.writeFileSync)(this.outputFile, (0, markdown_factory_1.h1)(benchmark.name, results.some((r) => !!r.subresults)
|
|
11
|
-
? (0, markdown_factory_1.lines)(results.map((r) => {
|
|
12
|
-
var _a;
|
|
13
|
-
const entries = [Object.assign(Object.assign({}, r), { label: 'total' })];
|
|
14
|
-
delete entries[0].subresults;
|
|
15
|
-
for (const subresult of (_a = r.subresults) !== null && _a !== void 0 ? _a : []) {
|
|
16
|
-
entries.push(subresult);
|
|
17
|
-
}
|
|
18
|
-
return (0, markdown_factory_1.h2)(r.label, (0, markdown_factory_1.table)(entries, [
|
|
19
|
-
{ field: 'label', label: '' },
|
|
20
|
-
...this.fields,
|
|
21
|
-
]));
|
|
22
|
-
}))
|
|
23
|
-
: (0, markdown_factory_1.table)(results, [{ field: 'label', label: '' }, ...this.fields])));
|
|
24
|
-
};
|
|
25
10
|
this.outputFile = opts.outputFile;
|
|
26
|
-
this.fields =
|
|
11
|
+
this.fields = opts.fields ?? ['min', 'average', 'p95', 'max'];
|
|
27
12
|
}
|
|
13
|
+
report = (benchmark, results) => {
|
|
14
|
+
(0, fs_1.writeFileSync)(this.outputFile, (0, markdown_factory_1.h1)(benchmark.name, results.some((r) => !!r.subresults)
|
|
15
|
+
? (0, markdown_factory_1.lines)(results.map((r) => {
|
|
16
|
+
const entries = [{ ...r, label: 'total' }];
|
|
17
|
+
delete entries[0].subresults;
|
|
18
|
+
for (const subresult of r.subresults ?? []) {
|
|
19
|
+
entries.push(subresult);
|
|
20
|
+
}
|
|
21
|
+
return (0, markdown_factory_1.h2)(r.label, (0, markdown_factory_1.table)(entries, [
|
|
22
|
+
{ field: 'label', label: '' },
|
|
23
|
+
...this.fields,
|
|
24
|
+
]));
|
|
25
|
+
}))
|
|
26
|
+
: (0, markdown_factory_1.table)(results, [{ field: 'label', label: '' }, ...this.fields])));
|
|
27
|
+
};
|
|
28
28
|
}
|
|
29
29
|
exports.MarkdownBenchmarkReporter = MarkdownBenchmarkReporter;
|
|
@@ -2,14 +2,15 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.SuiteConsoleReporter = void 0;
|
|
4
4
|
class SuiteConsoleReporter {
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
|
|
5
|
+
report = (results) => {
|
|
6
|
+
console.log('Suite Results:');
|
|
7
|
+
for (const [name, result] of Object.entries(results)) {
|
|
8
|
+
const tableEntries = result.map(({ raw, ...rest }) => ({
|
|
9
|
+
...rest,
|
|
10
|
+
}));
|
|
11
|
+
console.log(`Benchmark: ${name}`);
|
|
12
|
+
console.table(tableEntries);
|
|
13
|
+
}
|
|
14
|
+
};
|
|
14
15
|
}
|
|
15
16
|
exports.SuiteConsoleReporter = SuiteConsoleReporter;
|
package/dist/results.d.ts
CHANGED
|
@@ -25,10 +25,18 @@ export type Result = {
|
|
|
25
25
|
/**
|
|
26
26
|
* The raw durations, in order. Used for custom reporters.
|
|
27
27
|
*/
|
|
28
|
-
raw: number[];
|
|
28
|
+
raw: (number | Error)[];
|
|
29
|
+
/**
|
|
30
|
+
* Whether any run of the benchmark failed.
|
|
31
|
+
*/
|
|
32
|
+
failed?: boolean;
|
|
33
|
+
/**
|
|
34
|
+
* The rate of failure, if any.
|
|
35
|
+
*/
|
|
36
|
+
failureRate?: number;
|
|
29
37
|
/**
|
|
30
38
|
* Subresults, if any. Typically sourced from performance observer.
|
|
31
39
|
*/
|
|
32
40
|
subresults?: Result[];
|
|
33
41
|
};
|
|
34
|
-
export declare function calculateResultsFromDurations(label: string, durations: number[]): Result;
|
|
42
|
+
export declare function calculateResultsFromDurations(label: string, durations: (number | Error)[]): Result;
|
package/dist/results.js
CHANGED
|
@@ -2,13 +2,25 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.calculateResultsFromDurations = calculateResultsFromDurations;
|
|
4
4
|
function calculateResultsFromDurations(label, durations) {
|
|
5
|
-
|
|
5
|
+
const errors = [];
|
|
6
|
+
const results = [];
|
|
7
|
+
for (const duration of durations) {
|
|
8
|
+
if (duration instanceof Error) {
|
|
9
|
+
errors.push(duration);
|
|
10
|
+
}
|
|
11
|
+
else {
|
|
12
|
+
results.push(duration);
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
results.sort((a, b) => a - b);
|
|
6
16
|
return {
|
|
7
17
|
label,
|
|
8
|
-
min: Math.min(...
|
|
9
|
-
max: Math.max(...
|
|
10
|
-
average:
|
|
11
|
-
p95:
|
|
18
|
+
min: Math.min(...results),
|
|
19
|
+
max: Math.max(...results),
|
|
20
|
+
average: results.reduce((acc, duration) => acc + duration, 0) / results.length,
|
|
21
|
+
p95: results[Math.floor(results.length * 0.95)],
|
|
12
22
|
raw: durations,
|
|
23
|
+
failed: errors.length > 0,
|
|
24
|
+
failureRate: errors.length / durations.length,
|
|
13
25
|
};
|
|
14
26
|
}
|
package/dist/shared-api.d.ts
CHANGED
|
@@ -1,13 +1,15 @@
|
|
|
1
|
-
import { Action, SetupMethod, TeardownMethod } from './api-types';
|
|
1
|
+
import { Action, ErrorStrategy, SetupMethod, TeardownMethod } from './api-types';
|
|
2
2
|
export declare class BenchmarkBase {
|
|
3
3
|
setupEachMethods: SetupMethod[];
|
|
4
4
|
setupMethods: SetupMethod[];
|
|
5
5
|
teardownMethods: TeardownMethod[];
|
|
6
6
|
teardownEachMethods: TeardownMethod[];
|
|
7
7
|
action?: Action;
|
|
8
|
+
errorStrategy?: ErrorStrategy;
|
|
8
9
|
withSetup(setup: SetupMethod): this;
|
|
9
10
|
withSetupEach(setup: SetupMethod): this;
|
|
10
11
|
withTeardown(teardown: TeardownMethod): this;
|
|
11
12
|
withTeardownEach(teardown: TeardownMethod): this;
|
|
12
13
|
withAction(action: Action): this;
|
|
14
|
+
withErrorStrategy(errorStrategy: ErrorStrategy): this;
|
|
13
15
|
}
|
package/dist/shared-api.js
CHANGED
|
@@ -2,12 +2,12 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.BenchmarkBase = void 0;
|
|
4
4
|
class BenchmarkBase {
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
5
|
+
setupEachMethods = [];
|
|
6
|
+
setupMethods = [];
|
|
7
|
+
teardownMethods = [];
|
|
8
|
+
teardownEachMethods = [];
|
|
9
|
+
action;
|
|
10
|
+
errorStrategy;
|
|
11
11
|
withSetup(setup) {
|
|
12
12
|
this.setupMethods.push(setup);
|
|
13
13
|
return this;
|
|
@@ -28,5 +28,9 @@ class BenchmarkBase {
|
|
|
28
28
|
this.action = action;
|
|
29
29
|
return this;
|
|
30
30
|
}
|
|
31
|
+
withErrorStrategy(errorStrategy) {
|
|
32
|
+
this.errorStrategy = errorStrategy;
|
|
33
|
+
return this;
|
|
34
|
+
}
|
|
31
35
|
}
|
|
32
36
|
exports.BenchmarkBase = BenchmarkBase;
|
package/dist/suite.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { BenchmarkReporter, SuiteReporter } from './api-types';
|
|
1
|
+
import { BenchmarkReporter, ErrorStrategy, SuiteReporter } from './api-types';
|
|
2
2
|
import { Benchmark } from './benchmark';
|
|
3
3
|
import { Result } from './results';
|
|
4
4
|
import { Variation } from './variation';
|
|
@@ -11,11 +11,21 @@ export declare class Suite {
|
|
|
11
11
|
private variations;
|
|
12
12
|
private reporter;
|
|
13
13
|
private benchmarkReporter?;
|
|
14
|
+
private errorStrategy;
|
|
15
|
+
private shouldSetErrorStrategyOnBenchmarks;
|
|
14
16
|
constructor(name: string, options?: SuiteOptions);
|
|
15
17
|
addBenchmark(benchmark: Benchmark): this;
|
|
16
18
|
withVariation(variation: Variation): this;
|
|
17
19
|
withVariations(variations: Variation[]): this;
|
|
18
20
|
withReporter(reporter: SuiteReporter): this;
|
|
19
21
|
withBenchmarkReporter(reporter: BenchmarkReporter): this;
|
|
22
|
+
/**
|
|
23
|
+
* Sets the error strategy for the suite and optionally for the benchmarks.
|
|
24
|
+
* @param errorStrategy Determines what to do when an error occurs during a benchmark run. See {@link ErrorStrategy}
|
|
25
|
+
* @param opts Options for the error strategy. If `shouldSetOnBenchmarks` is true, the error strategy will be set on all benchmarks in the suite.
|
|
26
|
+
*/
|
|
27
|
+
withErrorStrategy(errorStrategy: ErrorStrategy, opts?: {
|
|
28
|
+
shouldSetOnBenchmarks?: boolean;
|
|
29
|
+
}): this;
|
|
20
30
|
run(): Promise<Record<string, Result[]>>;
|
|
21
31
|
}
|
package/dist/suite.js
CHANGED
|
@@ -1,24 +1,21 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
-
});
|
|
10
|
-
};
|
|
11
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
3
|
exports.Suite = void 0;
|
|
4
|
+
const api_types_1 = require("./api-types");
|
|
13
5
|
const suite_console_reporter_1 = require("./reporters/suite-console-reporter");
|
|
14
6
|
class Suite {
|
|
7
|
+
name;
|
|
8
|
+
benchmarks = [];
|
|
9
|
+
variations = [];
|
|
10
|
+
reporter;
|
|
11
|
+
benchmarkReporter;
|
|
12
|
+
errorStrategy = api_types_1.ErrorStrategy.Continue;
|
|
13
|
+
shouldSetErrorStrategyOnBenchmarks = false;
|
|
15
14
|
constructor(name, options) {
|
|
16
15
|
this.name = name;
|
|
17
|
-
this.benchmarks = [];
|
|
18
|
-
this.variations = [];
|
|
19
16
|
this.name = name;
|
|
20
17
|
this.benchmarks = [];
|
|
21
|
-
this.reporter =
|
|
18
|
+
this.reporter = options?.reporter || new suite_console_reporter_1.SuiteConsoleReporter();
|
|
22
19
|
}
|
|
23
20
|
addBenchmark(benchmark) {
|
|
24
21
|
this.benchmarks.push(benchmark);
|
|
@@ -40,19 +37,38 @@ class Suite {
|
|
|
40
37
|
this.benchmarkReporter = reporter;
|
|
41
38
|
return this;
|
|
42
39
|
}
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
40
|
+
/**
|
|
41
|
+
* Sets the error strategy for the suite and optionally for the benchmarks.
|
|
42
|
+
* @param errorStrategy Determines what to do when an error occurs during a benchmark run. See {@link ErrorStrategy}
|
|
43
|
+
* @param opts Options for the error strategy. If `shouldSetOnBenchmarks` is true, the error strategy will be set on all benchmarks in the suite.
|
|
44
|
+
*/
|
|
45
|
+
withErrorStrategy(errorStrategy, opts) {
|
|
46
|
+
this.errorStrategy = errorStrategy;
|
|
47
|
+
if (opts?.shouldSetOnBenchmarks) {
|
|
48
|
+
this.shouldSetErrorStrategyOnBenchmarks = opts?.shouldSetOnBenchmarks;
|
|
49
|
+
}
|
|
50
|
+
return this;
|
|
51
|
+
}
|
|
52
|
+
async run() {
|
|
53
|
+
const results = {};
|
|
54
|
+
for (const benchmark of this.benchmarks) {
|
|
55
|
+
benchmark.withVariations(this.variations);
|
|
56
|
+
if (this.benchmarkReporter) {
|
|
57
|
+
benchmark.withReporter(this.benchmarkReporter);
|
|
58
|
+
}
|
|
59
|
+
if (this.shouldSetErrorStrategyOnBenchmarks) {
|
|
60
|
+
benchmark.withErrorStrategy(this.errorStrategy);
|
|
61
|
+
}
|
|
62
|
+
results[benchmark.name] = await benchmark.run();
|
|
63
|
+
}
|
|
64
|
+
this.reporter.report(results);
|
|
65
|
+
if (this.errorStrategy === api_types_1.ErrorStrategy.DelayedThrow) {
|
|
66
|
+
const failedResults = Object.values(results).flatMap((r) => r.filter((r) => r.failed));
|
|
67
|
+
if (failedResults.length > 0) {
|
|
68
|
+
throw new api_types_1.AggregateSuiteError(results);
|
|
52
69
|
}
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
});
|
|
70
|
+
}
|
|
71
|
+
return results;
|
|
56
72
|
}
|
|
57
73
|
}
|
|
58
74
|
exports.Suite = Suite;
|
package/dist/variation.js
CHANGED
|
@@ -4,11 +4,12 @@ exports.Variation = void 0;
|
|
|
4
4
|
const shared_api_1 = require("./shared-api");
|
|
5
5
|
const utils_1 = require("./utils");
|
|
6
6
|
class Variation extends shared_api_1.BenchmarkBase {
|
|
7
|
+
name;
|
|
8
|
+
environment = {};
|
|
9
|
+
cliArgs = [];
|
|
7
10
|
constructor(name) {
|
|
8
11
|
super();
|
|
9
12
|
this.name = name;
|
|
10
|
-
this.environment = {};
|
|
11
|
-
this.cliArgs = [];
|
|
12
13
|
}
|
|
13
14
|
static FromEnvironmentVariables(variables) {
|
|
14
15
|
const combinations = (0, utils_1.findCombinations)(variables.map(([name, values]) => values.map((value) => [name, value])));
|
|
@@ -50,7 +51,10 @@ class Variation extends shared_api_1.BenchmarkBase {
|
|
|
50
51
|
});
|
|
51
52
|
}
|
|
52
53
|
withEnvironmentVariables(env) {
|
|
53
|
-
this.environment =
|
|
54
|
+
this.environment = {
|
|
55
|
+
...this.environment,
|
|
56
|
+
...env,
|
|
57
|
+
};
|
|
54
58
|
return this;
|
|
55
59
|
}
|
|
56
60
|
withEnvironmentVariable(name, value) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "flexi-bench",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.1.0",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"repository": {
|
|
@@ -11,11 +11,13 @@
|
|
|
11
11
|
"test": "node --import tsx --test './src/**/*.spec.ts'",
|
|
12
12
|
"test:watch": "node --import tsx --test --watch './src/**/*.spec.ts'",
|
|
13
13
|
"build": "tsc -p tsconfig.json",
|
|
14
|
-
"format": "prettier --write src"
|
|
14
|
+
"format": "prettier --write src",
|
|
15
|
+
"prepare": "patch-package && nx build flexi-bench"
|
|
15
16
|
},
|
|
16
17
|
"author": "",
|
|
17
18
|
"license": "MIT",
|
|
18
19
|
"devDependencies": {
|
|
20
|
+
"@nx-dotnet/nx-ghpages": "^2.3.0",
|
|
19
21
|
"@nx/js": "^19.4.2",
|
|
20
22
|
"@swc-node/register": "~1.9.1",
|
|
21
23
|
"@swc/core": "~1.5.7",
|
|
@@ -23,6 +25,7 @@
|
|
|
23
25
|
"@types/cli-progress": "^3.11.6",
|
|
24
26
|
"@types/node": "^20.14.10",
|
|
25
27
|
"nx": "19.4.2",
|
|
28
|
+
"patch-package": "^8.0.0",
|
|
26
29
|
"prettier": "^3.3.2",
|
|
27
30
|
"ts-node": "^10.9.2",
|
|
28
31
|
"tsx": "^4.16.2",
|