flexi-bench 0.0.0-alpha.4 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,46 @@
1
+ ## 0.2.0 (2026-02-02)
2
+
3
+
4
+ ### 🚀 Features
5
+
6
+ - add result metadata support (iterations, totalDuration, benchmarkName, variationName) ([22dfc33](https://github.com/agentender/flexi-bench/commit/22dfc33))
7
+ - improve console reporters with metadata display ([bfb4482](https://github.com/agentender/flexi-bench/commit/bfb4482))
8
+ - add composite and JSON suite reporters with examples ([c29f381](https://github.com/agentender/flexi-bench/commit/c29f381))
9
+ - include git info on results when in CI ([3b2a63d](https://github.com/agentender/flexi-bench/commit/3b2a63d))
10
+ - **docs-site:** upload algolia settings ([aea3eff](https://github.com/agentender/flexi-bench/commit/aea3eff))
11
+
12
+ ### 🩹 Fixes
13
+
14
+ - **e2e:** ensure output directory exists and improve error handling ([c0e79ea](https://github.com/agentender/flexi-bench/commit/c0e79ea))
15
+
16
+ ### ❤️ Thank You
17
+
18
+ - Craigory Coppola @AgentEnder
19
+
20
+ ## 0.1.0 (2024-07-28)
21
+
22
+
23
+ ### 🚀 Features
24
+
25
+ - add different error strategies for handling failing actions ([#11](https://github.com/AgentEnder/flexi-bench/pull/11))
26
+ - **repo:** add ability to publish only docs ([4cde432](https://github.com/AgentEnder/flexi-bench/commit/4cde432))
27
+
28
+ ### ❤️ Thank You
29
+
30
+ - Craigory Coppola @AgentEnder
31
+
32
+ # 0.0.0 (2024-07-28)
33
+
34
+
35
+ ### 🚀 Features
36
+
37
+ - add different error strategies for handling failing actions ([#11](https://github.com/AgentEnder/flexi-bench/pull/11))
38
+ - **repo:** add ability to publish only docs ([4cde432](https://github.com/AgentEnder/flexi-bench/commit/4cde432))
39
+
40
+ ### ❤️ Thank You
41
+
42
+ - Craigory Coppola @AgentEnder
43
+
1
44
  ## 0.0.0-alpha.4 (2024-07-17)
2
45
 
3
46
 
package/README.md CHANGED
@@ -332,6 +332,50 @@ const benchmark = new Benchmark('My Benchmark', {
332
332
  );
333
333
  ```
334
334
 
335
+ #### Variation Context
336
+
337
+ For more complex scenarios where you need to pass objects or data directly to the benchmark action, use the context API instead of environment variables:
338
+
339
+ ```javascript
340
+ const { Benchmark, Variation } = require('flexi-bench');
341
+
342
+ // Define different implementations
343
+ const loopProcessor = {
344
+ process: (data) => {
345
+ /* loop */
346
+ },
347
+ };
348
+ const reduceProcessor = {
349
+ process: (data) => {
350
+ /* reduce */
351
+ },
352
+ };
353
+
354
+ // Use FromContexts for clean, declarative variation setup
355
+ const benchmark = new Benchmark('Process Data')
356
+ .withIterations(100)
357
+ .withVariations(
358
+ Variation.FromContexts('processor', [
359
+ ['loop', loopProcessor],
360
+ ['reduce', reduceProcessor],
361
+ ]),
362
+ )
363
+ .withAction((variation) => {
364
+ // Use get() to retrieve context data
365
+ const processor = variation.get('processor');
366
+ processor.process(data);
367
+ });
368
+ ```
369
+
370
+ The context API provides:
371
+
372
+ - `Variation.FromContexts(key, [[name, value], ...])` - Create variations with context (cleanest API)
373
+ - `withContext(key, value)` - Attach custom data to a single variation
374
+ - `get(key)` - Retrieve context data (returns `T | undefined`)
375
+ - `getOrDefault(key, defaultValue)` - Retrieve with fallback value
376
+
377
+ Use `FromContexts` when creating multiple variations with the same context key - it's cleaner and more concise than individual `withVariation` calls with `withContext`.
378
+
335
379
  Variations can also be added to suites. Variations added to a suite will be applied to all benchmarks in the suite.
336
380
 
337
381
  For example, the below suite would run each benchmark with 'NO_DAEMON' set to true, and then with 'OTHER_VAR' set to 'value1' for a total of 4 benchmark runs in the suite:
@@ -364,10 +408,239 @@ const suite = new Suite('My Suite')
364
408
  );
365
409
  ```
366
410
 
411
+ ## Reporters
412
+
413
+ Reporters control how benchmark results are output. FlexiBench provides several built-in reporters:
414
+
415
+ ### Console Reporters
416
+
417
+ ```javascript
418
+ const {
419
+ Benchmark,
420
+ BenchmarkConsoleReporter,
421
+ SuiteConsoleReporter,
422
+ } = require('flexi-bench');
423
+
424
+ // For single benchmarks
425
+ const benchmark = new Benchmark('My Benchmark', {
426
+ iterations: 10,
427
+ action: () => {
428
+ /* ... */
429
+ },
430
+ reporter: new BenchmarkConsoleReporter(),
431
+ });
432
+
433
+ // For suites
434
+ const suite = new Suite('My Suite')
435
+ .withReporter(new SuiteConsoleReporter())
436
+ .addBenchmark(benchmark);
437
+ ```
438
+
439
+ Both console reporters support the `NO_COLOR` environment variable for disabling colors:
440
+
441
+ ```bash
442
+ NO_COLOR=1 node my-benchmark.js
443
+ ```
444
+
445
+ Or explicitly via options:
446
+
447
+ ```javascript
448
+ new SuiteConsoleReporter({ noColor: true });
449
+ new BenchmarkConsoleReporter({ noColor: true });
450
+ ```
451
+
452
+ ### Markdown Reporters
453
+
454
+ ```javascript
455
+ const {
456
+ MarkdownBenchmarkReporter,
457
+ MarkdownSuiteReporter,
458
+ } = require('flexi-bench');
459
+
460
+ // For single benchmark output
461
+ const benchmarkReporter = new MarkdownBenchmarkReporter({
462
+ outputFile: 'results.md',
463
+ fields: ['min', 'average', 'p95', 'max'],
464
+ append: true, // Set to true to avoid overwriting when running multiple benchmarks
465
+ });
466
+
467
+ // For suite-level output (recommended)
468
+ const suiteReporter = new MarkdownSuiteReporter({
469
+ outputFile: 'results.md',
470
+ title: 'Benchmark Results',
471
+ fields: ['min', 'average', 'p95', 'max', 'iterations'],
472
+ });
473
+ ```
474
+
475
+ #### Automatic Comparison Tables
476
+
477
+ When a benchmark has multiple variations, both `MarkdownBenchmarkReporter` and `MarkdownSuiteReporter` automatically generate a comparison table showing:
478
+
479
+ - Average time for each variation
480
+ - Percentage difference vs the fastest variation
481
+ - Multiplier (e.g., "2.5x" slower)
482
+ - Trophy emoji (🏆) marking the fastest variation
483
+
484
+ This makes it easy to see which implementation performs best at a glance.
485
+
486
+ ### JSON Reporter
487
+
488
+ For CI/CD integration:
489
+
490
+ ```javascript
491
+ const { JsonSuiteReporter } = require('flexi-bench');
492
+
493
+ const reporter = new JsonSuiteReporter({
494
+ outputFile: 'results.json',
495
+ pretty: true,
496
+ includeMetadata: true, // Includes timestamp, platform, node version
497
+ });
498
+ ```
499
+
500
+ ### Composite Reporter
501
+
502
+ Use multiple reporters simultaneously:
503
+
504
+ ```javascript
505
+ const {
506
+ CompositeReporter,
507
+ SuiteConsoleReporter,
508
+ MarkdownSuiteReporter,
509
+ JsonSuiteReporter,
510
+ } = require('flexi-bench');
511
+
512
+ const suite = new Suite('My Suite').withReporter(
513
+ new CompositeReporter([
514
+ new SuiteConsoleReporter(),
515
+ new MarkdownSuiteReporter({ outputFile: 'results.md' }),
516
+ new JsonSuiteReporter({ outputFile: 'results.json' }),
517
+ ]),
518
+ );
519
+ ```
520
+
521
+ ### Custom Reporters
522
+
523
+ Create custom reporters by implementing the `SuiteReporter` interface:
524
+
525
+ ```typescript
526
+ import { SuiteReporter, Result } from 'flexi-bench';
527
+
528
+ class MyCustomReporter implements SuiteReporter {
529
+ // Optional lifecycle hooks
530
+ onSuiteStart?(suiteName: string): void {
531
+ console.log(`Starting suite: ${suiteName}`);
532
+ }
533
+
534
+ onBenchmarkStart?(benchmarkName: string): void {
535
+ console.log(`Running: ${benchmarkName}`);
536
+ }
537
+
538
+ onBenchmarkEnd?(benchmarkName: string, results: Result[]): void {
539
+ console.log(`Completed: ${benchmarkName}`);
540
+ }
541
+
542
+ // Required: called after all benchmarks complete
543
+ report(results: Record<string, Result[]>): void {
544
+ // Process results here
545
+ for (const [name, result] of Object.entries(results)) {
546
+ console.log(`${name}: ${result[0].average}ms average`);
547
+ }
548
+ }
549
+ }
550
+ ```
551
+
552
+ ## Result Type
553
+
554
+ The `Result` type contains comprehensive information about benchmark runs:
555
+
556
+ ```typescript
557
+ interface Result {
558
+ // Basic metrics
559
+ label: string; // Name of benchmark or variation
560
+ min: number; // Minimum duration (ms)
561
+ max: number; // Maximum duration (ms)
562
+ average: number; // Average duration (ms)
563
+ p95: number; // 95th percentile duration (ms)
564
+ raw: (number | Error)[]; // Raw durations/errors
565
+
566
+ // Failure information
567
+ failed?: boolean; // Whether any iteration failed
568
+ failureRate?: number; // Rate of failures (0-1)
569
+
570
+ // Metadata (useful for custom reporters)
571
+ iterations?: number; // Number of iterations run
572
+ totalDuration?: number; // Total wall-clock time (ms)
573
+ benchmarkName?: string; // Name of parent benchmark
574
+ variationName?: string; // Name of variation
575
+
576
+ // Subresults from performance observer
577
+ subresults?: Result[];
578
+ }
579
+ ```
580
+
581
+ The Result type is exported from the main package:
582
+
583
+ ```typescript
584
+ import { Result } from 'flexi-bench';
585
+ ```
586
+
587
+ ## Cookbook
588
+
589
+ ### Benchmarking Multiple Implementations
590
+
591
+ Compare different implementations of the same interface:
592
+
593
+ ```javascript
594
+ const {
595
+ Suite,
596
+ Benchmark,
597
+ Variation,
598
+ MarkdownSuiteReporter,
599
+ } = require('flexi-bench');
600
+
601
+ // Define your implementations
602
+ const implementations = {
603
+ loop: (data) => {
604
+ /* loop implementation */
605
+ },
606
+ reduce: (data) => {
607
+ /* reduce implementation */
608
+ },
609
+ };
610
+
611
+ const suite = new Suite('Implementation Comparison')
612
+ .withReporter(new MarkdownSuiteReporter({ outputFile: 'results.md' }))
613
+ .addBenchmark(
614
+ new Benchmark('Process Data')
615
+ .withIterations(100)
616
+ // Create variations with context - no environment variables needed!
617
+ .withVariations(
618
+ Variation.FromContexts('impl', [
619
+ ['loop', implementations.loop],
620
+ ['reduce', implementations.reduce],
621
+ ]),
622
+ )
623
+ .withAction((variation) => {
624
+ // Retrieve the implementation directly from context
625
+ const impl = variation.get('impl');
626
+ const data = [
627
+ /* test data */
628
+ ];
629
+ impl(data);
630
+ }),
631
+ );
632
+ ```
633
+
367
634
  ## Examples
368
635
 
369
636
  See examples folder.
370
637
 
371
- - ./examples/benchmark.ts is the motivation for this project. It benchmarks the performance of Nx commands with and without a daemon.
372
- - ./examples/performance-observer.ts is a simple example of how to use the PerformanceObserver API to measure the performance of a function.
373
- - ./examples/simple-command.ts demonstrates how to benchmark a simple command.
638
+ - `./examples/benchmark.ts` - Full benchmark suite with environment variable variations
639
+ - `./examples/performance-observer.ts` - Using PerformanceObserver API
640
+ - `./examples/simple-command.ts` - Benchmarking CLI commands
641
+ - `./examples/multiple-reporters.ts` - Using CompositeReporter for multiple outputs
642
+ - `./examples/custom-reporter.ts` - Creating custom reporters with Result type
643
+ - `./examples/cookbook-different-implementations.ts` - Comparing implementations
644
+ - `./examples/no-color-support.ts` - Disabling colors in CI environments
645
+ - `./examples/markdown-reporter-append.ts` - Using append mode for MarkdownReporter
646
+ - `./examples/markdown-comparison.ts` - Demonstrates automatic comparison tables with variations
@@ -19,5 +19,50 @@ export interface BenchmarkReporter {
19
19
  report: (benchmark: Benchmark, results: Result[]) => void;
20
20
  }
21
21
  export interface SuiteReporter {
22
+ /**
23
+ * Called before the suite starts running.
24
+ * @param suiteName - The name of the suite
25
+ */
26
+ onSuiteStart?: (suiteName: string) => void;
27
+ /**
28
+ * Called before each benchmark starts.
29
+ * @param benchmarkName - The name of the benchmark
30
+ */
31
+ onBenchmarkStart?: (benchmarkName: string) => void;
32
+ /**
33
+ * Called after each benchmark completes.
34
+ * @param benchmarkName - The name of the benchmark
35
+ * @param results - The results for the benchmark
36
+ */
37
+ onBenchmarkEnd?: (benchmarkName: string, results: Result[]) => void;
38
+ /**
39
+ * Called after all benchmarks complete.
40
+ * @param results - All benchmark results keyed by benchmark name
41
+ */
22
42
  report: (results: Record<string, Result[]>) => void;
23
43
  }
44
+ /**
45
+ * The strategy to use when an error occurs during a benchmark run.
46
+ */
47
+ export declare enum ErrorStrategy {
48
+ /**
49
+ * Continue running the benchmark. Errors will be collected and reported at the end. This is the default behavior.
50
+ */
51
+ Continue = "continue",
52
+ /**
53
+ * Abort the benchmark run immediately when an error occurs.
54
+ */
55
+ Abort = "abort",
56
+ /**
57
+ * Delay the error until the end of the benchmark run. This is useful when you want to see all the errors at once
58
+ */
59
+ DelayedThrow = "delayed-throw"
60
+ }
61
+ export declare class AggregateBenchmarkError extends Error {
62
+ results: Result[];
63
+ constructor(results: Result[]);
64
+ }
65
+ export declare class AggregateSuiteError extends Error {
66
+ results: Record<string, Result[]>;
67
+ constructor(results: Record<string, Result[]>);
68
+ }
package/dist/api-types.js CHANGED
@@ -1,2 +1,37 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AggregateSuiteError = exports.AggregateBenchmarkError = exports.ErrorStrategy = void 0;
4
+ /**
5
+ * The strategy to use when an error occurs during a benchmark run.
6
+ */
7
+ var ErrorStrategy;
8
+ (function (ErrorStrategy) {
9
+ /**
10
+ * Continue running the benchmark. Errors will be collected and reported at the end. This is the default behavior.
11
+ */
12
+ ErrorStrategy["Continue"] = "continue";
13
+ /**
14
+ * Abort the benchmark run immediately when an error occurs.
15
+ */
16
+ ErrorStrategy["Abort"] = "abort";
17
+ /**
18
+ * Delay the error until the end of the benchmark run. This is useful when you want to see all the errors at once
19
+ */
20
+ ErrorStrategy["DelayedThrow"] = "delayed-throw";
21
+ })(ErrorStrategy || (exports.ErrorStrategy = ErrorStrategy = {}));
22
+ class AggregateBenchmarkError extends Error {
23
+ results;
24
+ constructor(results) {
25
+ super('[AggregateBenchmarkError]: One or more benchmarks failed. Check the results for more information.');
26
+ this.results = results;
27
+ }
28
+ }
29
+ exports.AggregateBenchmarkError = AggregateBenchmarkError;
30
+ class AggregateSuiteError extends Error {
31
+ results;
32
+ constructor(results) {
33
+ super('[AggregateSuiteError]: One or more benchmarks failed. Check the results for more information.');
34
+ this.results = results;
35
+ }
36
+ }
37
+ exports.AggregateSuiteError = AggregateSuiteError;
@@ -23,7 +23,7 @@ function suite(name, fn) {
23
23
  activeSuite = suite;
24
24
  const transformed = fn(suite);
25
25
  activeSuite = null;
26
- return (transformed !== null && transformed !== void 0 ? transformed : suite).run();
26
+ return (transformed ?? suite).run();
27
27
  }
28
28
  let activeBenchmark = null;
29
29
  /**
@@ -38,10 +38,10 @@ function benchmark(name, fn) {
38
38
  const transformed = fn(benchmark);
39
39
  activeBenchmark = null;
40
40
  if (activeSuite) {
41
- activeSuite.addBenchmark(transformed !== null && transformed !== void 0 ? transformed : benchmark);
41
+ activeSuite.addBenchmark(transformed ?? benchmark);
42
42
  }
43
43
  else {
44
- return (transformed !== null && transformed !== void 0 ? transformed : benchmark).run();
44
+ return (transformed ?? benchmark).run();
45
45
  }
46
46
  }
47
47
  let activeVariation = null;
@@ -57,10 +57,10 @@ function variation(name, fn) {
57
57
  const transformed = fn(variation);
58
58
  activeVariation = null;
59
59
  if (activeBenchmark) {
60
- activeBenchmark.withVariation(transformed !== null && transformed !== void 0 ? transformed : variation);
60
+ activeBenchmark.withVariation(transformed ?? variation);
61
61
  }
62
62
  else if (activeSuite) {
63
- activeSuite.withVariation(transformed !== null && transformed !== void 0 ? transformed : variation);
63
+ activeSuite.withVariation(transformed ?? variation);
64
64
  }
65
65
  else {
66
66
  throw new Error('`variation` must be called within a benchmark or suite');
@@ -1,5 +1,5 @@
1
1
  import { Variation } from './variation';
2
- import { TeardownMethod, SetupMethod, Action, BenchmarkReporter } from './api-types';
2
+ import { TeardownMethod, SetupMethod, Action, BenchmarkReporter, ErrorStrategy } from './api-types';
3
3
  import { Result } from './results';
4
4
  import { PerformanceObserverOptions } from './performance-observer';
5
5
  import { BenchmarkBase } from './shared-api';
@@ -10,6 +10,7 @@ export declare class Benchmark extends BenchmarkBase {
10
10
  private timeout?;
11
11
  private reporter;
12
12
  private watcher?;
13
+ errorStrategy: ErrorStrategy;
13
14
  constructor(name: string, options?: {
14
15
  setup?: SetupMethod;
15
16
  teardown?: TeardownMethod;