@fuzdev/fuz_util 0.42.0 → 0.43.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +1 -1
  2. package/README.md +19 -12
  3. package/dist/async.d.ts +2 -2
  4. package/dist/async.d.ts.map +1 -1
  5. package/dist/async.js +2 -2
  6. package/dist/benchmark.d.ts +179 -0
  7. package/dist/benchmark.d.ts.map +1 -0
  8. package/dist/benchmark.js +400 -0
  9. package/dist/benchmark_baseline.d.ts +195 -0
  10. package/dist/benchmark_baseline.d.ts.map +1 -0
  11. package/dist/benchmark_baseline.js +415 -0
  12. package/dist/benchmark_format.d.ts +92 -0
  13. package/dist/benchmark_format.d.ts.map +1 -0
  14. package/dist/benchmark_format.js +327 -0
  15. package/dist/benchmark_stats.d.ts +112 -0
  16. package/dist/benchmark_stats.d.ts.map +1 -0
  17. package/dist/benchmark_stats.js +336 -0
  18. package/dist/benchmark_types.d.ts +174 -0
  19. package/dist/benchmark_types.d.ts.map +1 -0
  20. package/dist/benchmark_types.js +1 -0
  21. package/dist/library_json.d.ts +3 -3
  22. package/dist/library_json.d.ts.map +1 -1
  23. package/dist/library_json.js +1 -1
  24. package/dist/object.js +1 -1
  25. package/dist/stats.d.ts +126 -0
  26. package/dist/stats.d.ts.map +1 -0
  27. package/dist/stats.js +262 -0
  28. package/dist/time.d.ts +161 -0
  29. package/dist/time.d.ts.map +1 -0
  30. package/dist/time.js +260 -0
  31. package/dist/timings.d.ts +1 -7
  32. package/dist/timings.d.ts.map +1 -1
  33. package/dist/timings.js +16 -16
  34. package/package.json +21 -19
  35. package/src/lib/async.ts +3 -3
  36. package/src/lib/benchmark.ts +498 -0
  37. package/src/lib/benchmark_baseline.ts +573 -0
  38. package/src/lib/benchmark_format.ts +379 -0
  39. package/src/lib/benchmark_stats.ts +448 -0
  40. package/src/lib/benchmark_types.ts +197 -0
  41. package/src/lib/library_json.ts +3 -3
  42. package/src/lib/object.ts +1 -1
  43. package/src/lib/stats.ts +353 -0
  44. package/src/lib/time.ts +314 -0
  45. package/src/lib/timings.ts +17 -17
  46. package/src/lib/types.ts +2 -2
@@ -0,0 +1,448 @@
1
+ /**
2
+ * Benchmark-specific statistical analysis.
3
+ * Uses the general stats utilities from stats.ts for timing/performance analysis.
4
+ * All timing values are in nanoseconds.
5
+ */
6
+
7
+ import {TIME_NS_PER_SEC, time_format_adaptive} from './time.js';
8
+ import {
9
+ stats_mean,
10
+ stats_median,
11
+ stats_std_dev,
12
+ stats_percentile,
13
+ stats_cv,
14
+ stats_min_max,
15
+ stats_confidence_interval,
16
+ stats_outliers_mad,
17
+ } from './stats.js';
18
+
19
+ /**
20
+ * Minimal stats interface for comparison.
21
+ * This allows comparing stats from different sources (e.g., loaded baselines).
22
+ */
23
+ export interface BenchmarkStatsComparable {
24
+ mean_ns: number;
25
+ std_dev_ns: number;
26
+ sample_size: number;
27
+ confidence_interval_ns: [number, number];
28
+ }
29
+
30
+ /**
31
+ * Effect size magnitude interpretation (Cohen's d).
32
+ */
33
+ export type EffectMagnitude = 'negligible' | 'small' | 'medium' | 'large';
34
+
35
+ /**
36
+ * Result from comparing two benchmark stats.
37
+ */
38
+ export interface BenchmarkComparison {
39
+ /** Which benchmark is faster ('a', 'b', or 'equal' if difference is negligible) */
40
+ faster: 'a' | 'b' | 'equal';
41
+ /** How much faster the winner is (e.g., 1.5 means 1.5x faster) */
42
+ speedup_ratio: number;
43
+ /** Whether the difference is statistically significant at the given alpha */
44
+ significant: boolean;
45
+ /** P-value from Welch's t-test (lower = more confident the difference is real) */
46
+ p_value: number;
47
+ /** Cohen's d effect size (magnitude of difference independent of sample size) */
48
+ effect_size: number;
49
+ /** Interpretation of effect size */
50
+ effect_magnitude: EffectMagnitude;
51
+ /** Whether the 95% confidence intervals overlap */
52
+ ci_overlap: boolean;
53
+ /** Human-readable interpretation of the comparison */
54
+ recommendation: string;
55
+ }
56
+
57
+ /**
58
+ * Options for benchmark comparison.
59
+ */
60
+ export interface BenchmarkCompareOptions {
61
+ /** Significance level for hypothesis testing (default: 0.05) */
62
+ alpha?: number;
63
+ }
64
+
65
+ /**
66
+ * Complete statistical analysis of timing measurements.
67
+ * Includes outlier detection, descriptive statistics, and performance metrics.
68
+ * All timing values are in nanoseconds.
69
+ */
70
+ export class BenchmarkStats {
71
+ /** Mean (average) time in nanoseconds */
72
+ readonly mean_ns: number;
73
+ /** Median time in nanoseconds */
74
+ readonly median_ns: number;
75
+ /** Standard deviation in nanoseconds */
76
+ readonly std_dev_ns: number;
77
+ /** Minimum time in nanoseconds */
78
+ readonly min_ns: number;
79
+ /** Maximum time in nanoseconds */
80
+ readonly max_ns: number;
81
+ /** 75th percentile in nanoseconds */
82
+ readonly p75_ns: number;
83
+ /** 90th percentile in nanoseconds */
84
+ readonly p90_ns: number;
85
+ /** 95th percentile in nanoseconds */
86
+ readonly p95_ns: number;
87
+ /** 99th percentile in nanoseconds */
88
+ readonly p99_ns: number;
89
+ /** Coefficient of variation (std_dev / mean) */
90
+ readonly cv: number;
91
+ /** 95% confidence interval for the mean in nanoseconds */
92
+ readonly confidence_interval_ns: [number, number];
93
+ /** Array of detected outlier values in nanoseconds */
94
+ readonly outliers_ns: Array<number>;
95
+ /** Ratio of outliers to total samples */
96
+ readonly outlier_ratio: number;
97
+ /** Number of samples after outlier removal */
98
+ readonly sample_size: number;
99
+ /** Original number of samples (before outlier removal) */
100
+ readonly raw_sample_size: number;
101
+ /** Operations per second (NS_PER_SEC / mean_ns) */
102
+ readonly ops_per_second: number;
103
+ /** Number of failed iterations (NaN, Infinity, or negative values) */
104
+ readonly failed_iterations: number;
105
+
106
+ constructor(timings_ns: Array<number>) {
107
+ // Filter out invalid values (NaN, Infinity, negative)
108
+ const valid_timings: Array<number> = [];
109
+ let failed_count = 0;
110
+
111
+ for (const t of timings_ns) {
112
+ if (!isNaN(t) && isFinite(t) && t > 0) {
113
+ valid_timings.push(t);
114
+ } else {
115
+ failed_count++;
116
+ }
117
+ }
118
+
119
+ this.failed_iterations = failed_count;
120
+ this.raw_sample_size = timings_ns.length;
121
+
122
+ // If no valid timings, return empty stats
123
+ if (valid_timings.length === 0) {
124
+ this.mean_ns = NaN;
125
+ this.median_ns = NaN;
126
+ this.std_dev_ns = NaN;
127
+ this.min_ns = NaN;
128
+ this.max_ns = NaN;
129
+ this.p75_ns = NaN;
130
+ this.p90_ns = NaN;
131
+ this.p95_ns = NaN;
132
+ this.p99_ns = NaN;
133
+ this.cv = NaN;
134
+ this.confidence_interval_ns = [NaN, NaN];
135
+ this.outliers_ns = [];
136
+ this.outlier_ratio = 0;
137
+ this.sample_size = 0;
138
+ this.ops_per_second = 0;
139
+ return;
140
+ }
141
+
142
+ // Detect and remove outliers
143
+ const {cleaned, outliers} = stats_outliers_mad(valid_timings);
144
+ const sorted_cleaned = [...cleaned].sort((a, b) => a - b);
145
+
146
+ this.outliers_ns = outliers;
147
+ this.outlier_ratio = outliers.length / valid_timings.length;
148
+ this.sample_size = cleaned.length;
149
+
150
+ // Calculate statistics on cleaned data
151
+ this.mean_ns = stats_mean(cleaned);
152
+ this.median_ns = stats_median(sorted_cleaned);
153
+ this.std_dev_ns = stats_std_dev(cleaned, this.mean_ns);
154
+
155
+ const {min, max} = stats_min_max(sorted_cleaned);
156
+ this.min_ns = min;
157
+ this.max_ns = max;
158
+
159
+ this.p75_ns = stats_percentile(sorted_cleaned, 0.75);
160
+ this.p90_ns = stats_percentile(sorted_cleaned, 0.9);
161
+ this.p95_ns = stats_percentile(sorted_cleaned, 0.95);
162
+ this.p99_ns = stats_percentile(sorted_cleaned, 0.99);
163
+
164
+ this.cv = stats_cv(this.mean_ns, this.std_dev_ns);
165
+ this.confidence_interval_ns = stats_confidence_interval(cleaned);
166
+
167
+ // Calculate throughput (operations per second)
168
+ this.ops_per_second = this.mean_ns > 0 ? TIME_NS_PER_SEC / this.mean_ns : 0;
169
+ }
170
+
171
+ /**
172
+ * Format stats as a human-readable string.
173
+ */
174
+ toString(): string {
175
+ return `BenchmarkStats(mean=${time_format_adaptive(this.mean_ns)}, ops/sec=${this.ops_per_second.toFixed(2)}, cv=${(this.cv * 100).toFixed(1)}%, samples=${this.sample_size})`;
176
+ }
177
+ }
178
+
179
+ /**
180
+ * Compare two benchmark results for statistical significance.
181
+ * Uses Welch's t-test (handles unequal variances) and Cohen's d effect size.
182
+ *
183
+ * @param a - First benchmark stats (or any object with required properties)
184
+ * @param b - Second benchmark stats (or any object with required properties)
185
+ * @param options - Comparison options
186
+ * @returns Comparison result with significance, effect size, and recommendation
187
+ *
188
+ * @example
189
+ * ```ts
190
+ * const comparison = benchmark_stats_compare(result_a.stats, result_b.stats);
191
+ * if (comparison.significant) {
192
+ * console.log(`${comparison.faster} is ${comparison.speedup_ratio.toFixed(2)}x faster`);
193
+ * }
194
+ * ```
195
+ */
196
+ export const benchmark_stats_compare = (
197
+ a: BenchmarkStatsComparable,
198
+ b: BenchmarkStatsComparable,
199
+ options?: BenchmarkCompareOptions,
200
+ ): BenchmarkComparison => {
201
+ const alpha = options?.alpha ?? 0.05;
202
+
203
+ // Handle edge cases
204
+ if (a.sample_size === 0 || b.sample_size === 0) {
205
+ return {
206
+ faster: 'equal',
207
+ speedup_ratio: 1,
208
+ significant: false,
209
+ p_value: 1,
210
+ effect_size: 0,
211
+ effect_magnitude: 'negligible',
212
+ ci_overlap: true,
213
+ recommendation: 'Insufficient data for comparison',
214
+ };
215
+ }
216
+
217
+ // Calculate speedup ratio (lower time = faster, so compare by time not ops/sec)
218
+ const speedup_ratio = a.mean_ns < b.mean_ns ? b.mean_ns / a.mean_ns : a.mean_ns / b.mean_ns;
219
+ const faster: 'a' | 'b' | 'equal' =
220
+ a.mean_ns < b.mean_ns ? 'a' : a.mean_ns > b.mean_ns ? 'b' : 'equal';
221
+
222
+ // Welch's t-test (handles unequal variances)
223
+ // Special case: if both have zero variance, t-test is undefined
224
+ let p_value: number;
225
+ if (a.std_dev_ns === 0 && b.std_dev_ns === 0) {
226
+ // When there's no variance, any difference is 100% reliable (p=0) or identical (p=1)
227
+ p_value = a.mean_ns === b.mean_ns ? 1 : 0;
228
+ } else {
229
+ const {t_statistic, degrees_of_freedom} = welch_t_test(
230
+ a.mean_ns,
231
+ a.std_dev_ns,
232
+ a.sample_size,
233
+ b.mean_ns,
234
+ b.std_dev_ns,
235
+ b.sample_size,
236
+ );
237
+ // Calculate two-tailed p-value using t-distribution approximation
238
+ p_value = t_distribution_p_value(Math.abs(t_statistic), degrees_of_freedom);
239
+ }
240
+
241
+ // Cohen's d effect size
242
+ const pooled_std_dev = Math.sqrt(
243
+ ((a.sample_size - 1) * a.std_dev_ns ** 2 + (b.sample_size - 1) * b.std_dev_ns ** 2) /
244
+ (a.sample_size + b.sample_size - 2),
245
+ );
246
+
247
+ // When pooled_std_dev is 0 but means differ, effect is maximal (infinite)
248
+ // When means are equal, effect is 0
249
+ let effect_size: number;
250
+ let effect_magnitude: EffectMagnitude;
251
+
252
+ if (pooled_std_dev === 0) {
253
+ // Zero variance case - if means differ, it's a definitive difference
254
+ if (a.mean_ns === b.mean_ns) {
255
+ effect_size = 0;
256
+ effect_magnitude = 'negligible';
257
+ } else {
258
+ // Any difference is 100% reliable when there's no variance
259
+ effect_size = Infinity;
260
+ effect_magnitude = 'large';
261
+ }
262
+ } else {
263
+ effect_size = Math.abs(a.mean_ns - b.mean_ns) / pooled_std_dev;
264
+ // Interpret effect size (Cohen's conventions)
265
+ effect_magnitude =
266
+ effect_size < 0.2
267
+ ? 'negligible'
268
+ : effect_size < 0.5
269
+ ? 'small'
270
+ : effect_size < 0.8
271
+ ? 'medium'
272
+ : 'large';
273
+ }
274
+
275
+ // Check confidence interval overlap
276
+ const ci_overlap =
277
+ a.confidence_interval_ns[0] <= b.confidence_interval_ns[1] &&
278
+ b.confidence_interval_ns[0] <= a.confidence_interval_ns[1];
279
+
280
+ // Determine significance
281
+ const significant = p_value < alpha;
282
+
283
+ // Generate recommendation
284
+ let recommendation: string;
285
+ if (!significant) {
286
+ recommendation =
287
+ effect_magnitude === 'negligible'
288
+ ? 'No meaningful difference detected'
289
+ : `Difference not statistically significant (p=${p_value.toFixed(3)}), but effect size suggests ${effect_magnitude} practical difference`;
290
+ } else if (effect_magnitude === 'negligible') {
291
+ recommendation = `Statistically significant but negligible practical difference (${speedup_ratio.toFixed(2)}x)`;
292
+ } else {
293
+ recommendation = `${faster === 'a' ? 'First' : 'Second'} is ${speedup_ratio.toFixed(2)}x faster with ${effect_magnitude} effect size (p=${p_value.toFixed(3)})`;
294
+ }
295
+
296
+ // Adjust 'faster' to 'equal' if effect is negligible
297
+ const adjusted_faster = effect_magnitude === 'negligible' ? 'equal' : faster;
298
+
299
+ return {
300
+ faster: adjusted_faster,
301
+ speedup_ratio,
302
+ significant,
303
+ p_value,
304
+ effect_size,
305
+ effect_magnitude,
306
+ ci_overlap,
307
+ recommendation,
308
+ };
309
+ };
310
+
311
+ /**
312
+ * Calculate Welch's t-test statistic and degrees of freedom.
313
+ * Welch's t-test is more robust than Student's t-test when variances are unequal.
314
+ */
315
+ const welch_t_test = (
316
+ mean1: number,
317
+ std1: number,
318
+ n1: number,
319
+ mean2: number,
320
+ std2: number,
321
+ n2: number,
322
+ ): {t_statistic: number; degrees_of_freedom: number} => {
323
+ const var1 = std1 ** 2;
324
+ const var2 = std2 ** 2;
325
+
326
+ const se1 = var1 / n1;
327
+ const se2 = var2 / n2;
328
+
329
+ const t_statistic = (mean1 - mean2) / Math.sqrt(se1 + se2);
330
+
331
+ // Welch-Satterthwaite degrees of freedom
332
+ const numerator = (se1 + se2) ** 2;
333
+ const denominator = se1 ** 2 / (n1 - 1) + se2 ** 2 / (n2 - 1);
334
+ const degrees_of_freedom = numerator / denominator;
335
+
336
+ return {t_statistic, degrees_of_freedom};
337
+ };
338
+
339
+ /**
340
+ * Approximate p-value from t-distribution using the approximation formula.
341
+ * This avoids requiring a full t-distribution table or library.
342
+ * For large df (>30), this approximation is very accurate.
343
+ */
344
+ const t_distribution_p_value = (t: number, df: number): number => {
345
+ // Use normal approximation for large df
346
+ if (df > 100) {
347
+ // Standard normal CDF approximation
348
+ return 2 * (1 - normal_cdf(t));
349
+ }
350
+
351
+ // For smaller df, use a more accurate approximation
352
+ // Based on the incomplete beta function relationship
353
+ const x = df / (df + t * t);
354
+ const a = df / 2;
355
+ const b = 0.5;
356
+
357
+ // Approximation of regularized incomplete beta function
358
+ // This is accurate to about 4 decimal places for typical use cases
359
+ const beta_approx = incomplete_beta_approx(x, a, b);
360
+ return beta_approx;
361
+ };
362
+
363
+ /**
364
+ * Standard normal CDF approximation (Abramowitz and Stegun formula 7.1.26).
365
+ */
366
+ const normal_cdf = (x: number): number => {
367
+ const t = 1 / (1 + 0.2316419 * Math.abs(x));
368
+ const d = 0.3989423 * Math.exp((-x * x) / 2);
369
+ const p =
370
+ d * t * (0.3193815 + t * (-0.3565638 + t * (1.781478 + t * (-1.821256 + t * 1.330274))));
371
+ return x > 0 ? 1 - p : p;
372
+ };
373
+
374
+ /**
375
+ * Approximate regularized incomplete beta function for p-value calculation.
376
+ * Uses continued fraction expansion for reasonable accuracy.
377
+ */
378
+ const incomplete_beta_approx = (x: number, a: number, b: number): number => {
379
+ // Simple approximation using the relationship between beta and normal distributions
380
+ // For our use case (t-distribution p-values), this provides sufficient accuracy
381
+ if (x <= 0) return 0;
382
+ if (x >= 1) return 1;
383
+
384
+ // Use symmetry if needed
385
+ if (x > (a + 1) / (a + b + 2)) {
386
+ return 1 - incomplete_beta_approx(1 - x, b, a);
387
+ }
388
+
389
+ // Continued fraction approximation (first few terms)
390
+ const lnBeta = ln_gamma(a) + ln_gamma(b) - ln_gamma(a + b);
391
+ const front = Math.exp(Math.log(x) * a + Math.log(1 - x) * b - lnBeta) / a;
392
+
393
+ // Simple continued fraction (limited iterations for speed)
394
+ let f = 1;
395
+ let c = 1;
396
+ let d = 0;
397
+
398
+ for (let m = 1; m <= 100; m++) {
399
+ const m2 = 2 * m;
400
+
401
+ // Even step
402
+ let aa = (m * (b - m) * x) / ((a + m2 - 1) * (a + m2));
403
+ d = 1 + aa * d;
404
+ if (Math.abs(d) < 1e-30) d = 1e-30;
405
+ c = 1 + aa / c;
406
+ if (Math.abs(c) < 1e-30) c = 1e-30;
407
+ d = 1 / d;
408
+ f *= d * c;
409
+
410
+ // Odd step
411
+ aa = (-(a + m) * (a + b + m) * x) / ((a + m2) * (a + m2 + 1));
412
+ d = 1 + aa * d;
413
+ if (Math.abs(d) < 1e-30) d = 1e-30;
414
+ c = 1 + aa / c;
415
+ if (Math.abs(c) < 1e-30) c = 1e-30;
416
+ d = 1 / d;
417
+ const delta = d * c;
418
+ f *= delta;
419
+
420
+ if (Math.abs(delta - 1) < 1e-8) break;
421
+ }
422
+
423
+ return front * f;
424
+ };
425
+
426
+ /**
427
+ * Log gamma function approximation (Lanczos approximation).
428
+ */
429
+ const ln_gamma = (z: number): number => {
430
+ const g = 7;
431
+ const c = [
432
+ 0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313,
433
+ -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6,
434
+ 1.5056327351493116e-7,
435
+ ];
436
+
437
+ if (z < 0.5) {
438
+ return Math.log(Math.PI / Math.sin(Math.PI * z)) - ln_gamma(1 - z);
439
+ }
440
+
441
+ const z_adj = z - 1;
442
+ let x = c[0]!;
443
+ for (let i = 1; i < g + 2; i++) {
444
+ x += c[i]! / (z_adj + i);
445
+ }
446
+ const t = z_adj + g + 0.5;
447
+ return 0.5 * Math.log(2 * Math.PI) + (z_adj + 0.5) * Math.log(t) - t + Math.log(x);
448
+ };
@@ -0,0 +1,197 @@
1
+ import type {BenchmarkStats} from './benchmark_stats.js';
2
+ import type {Timer} from './time.js';
3
+
4
+ /**
5
+ * Configuration options for a benchmark suite.
6
+ */
7
+ export interface BenchmarkConfig {
8
+ /**
9
+ * Target duration to run each benchmark task in milliseconds.
10
+ * The benchmark will run until this duration is reached or max_iterations is hit.
11
+ * Default: 1000ms
12
+ */
13
+ duration_ms?: number;
14
+
15
+ /**
16
+ * Number of warmup iterations before actual measurements.
17
+ * Warmup helps stabilize JIT compilation and caches.
18
+ * Default: 5
19
+ */
20
+ warmup_iterations?: number;
21
+
22
+ /**
23
+ * Cooldown time between tasks in milliseconds.
24
+ * Helps prevent interference between benchmarks.
25
+ * Default: 100ms
26
+ */
27
+ cooldown_ms?: number;
28
+
29
+ /**
30
+ * Minimum number of iterations to run.
31
+ * Default: 10
32
+ */
33
+ min_iterations?: number;
34
+
35
+ /**
36
+ * Maximum number of iterations to run.
37
+ * Prevents infinite loops if function is extremely fast.
38
+ * Default: 100000
39
+ */
40
+ max_iterations?: number;
41
+
42
+ /**
43
+ * Custom timer to use for measurements.
44
+ * Default: timer_default (auto-detects environment)
45
+ */
46
+ timer?: Timer;
47
+
48
+ /**
49
+ * Callback invoked after each iteration completes.
50
+ * Useful for triggering garbage collection, logging progress, early termination,
51
+ * or custom instrumentation.
52
+ *
53
+ * **Note**: The callback time is NOT included in iteration measurements - it runs
54
+ * after the timing capture. However, frequent GC calls will slow overall benchmark
55
+ * execution time.
56
+ *
57
+ * @param task_name - Name of the current task being benchmarked
58
+ * @param iteration - Current iteration number (1-indexed)
59
+ * @param abort - Call to stop the benchmark early for this task
60
+ *
61
+ * @example
62
+ * ```ts
63
+ * // Trigger GC between iterations (run node with --expose-gc)
64
+ * new Benchmark({
65
+ * on_iteration: () => {
66
+ * if (globalThis.gc) globalThis.gc();
67
+ * }
68
+ * })
69
+ *
70
+ * // Log progress for long-running benchmarks
71
+ * new Benchmark({
72
+ * on_iteration: (name, iteration) => {
73
+ * if (iteration % 1000 === 0) {
74
+ * console.log(`${name}: ${iteration} iterations`);
75
+ * }
76
+ * }
77
+ * })
78
+ *
79
+ * // Stop early when converged
80
+ * new Benchmark({
81
+ * on_iteration: (name, iteration, abort) => {
82
+ * if (iteration > 1000 && has_stabilized()) abort();
83
+ * }
84
+ * })
85
+ * ```
86
+ */
87
+ on_iteration?: (task_name: string, iteration: number, abort: () => void) => void;
88
+
89
+ /**
90
+ * Callback invoked after each task completes.
91
+ * Useful for logging progress during long benchmark runs.
92
+ *
93
+ * @param result - The completed benchmark result
94
+ * @param index - Zero-based index of the completed task
95
+ * @param total - Total number of tasks to run
96
+ *
97
+ * @example
98
+ * ```ts
99
+ * new Benchmark({
100
+ * on_task_complete: (result, index, total) => {
101
+ * console.log(`[${index + 1}/${total}] ${result.name}: ${result.stats.ops_per_second.toFixed(0)} ops/sec`);
102
+ * }
103
+ * })
104
+ * ```
105
+ */
106
+ on_task_complete?: (result: BenchmarkResult, index: number, total: number) => void;
107
+ }
108
+
109
+ /**
110
+ * A benchmark task to execute.
111
+ */
112
+ export interface BenchmarkTask {
113
+ /** Name of the task (for display) */
114
+ name: string;
115
+
116
+ /** Function to benchmark (sync or async). Return values are ignored. */
117
+ fn: () => unknown;
118
+
119
+ /**
120
+ * Optional setup function run before benchmarking this task.
121
+ * Not included in timing measurements.
122
+ */
123
+ setup?: () => void | Promise<void>;
124
+
125
+ /**
126
+ * Optional teardown function run after benchmarking this task.
127
+ * Not included in timing measurements.
128
+ */
129
+ teardown?: () => void | Promise<void>;
130
+
131
+ /**
132
+ * If true, skip this task during benchmark runs.
133
+ * Useful for temporarily disabling tasks during development.
134
+ */
135
+ skip?: boolean;
136
+
137
+ /**
138
+ * If true, run only this task (and other tasks marked `only`).
139
+ * Useful for focusing on specific tasks during development.
140
+ */
141
+ only?: boolean;
142
+
143
+ /**
144
+ * Hint for whether the function is sync or async.
145
+ * If not provided, automatically detected during warmup.
146
+ * Setting this explicitly skips per-iteration promise checking for sync functions.
147
+ */
148
+ async?: boolean;
149
+ }
150
+
151
+ /**
152
+ * Result from running a single benchmark task.
153
+ */
154
+ export interface BenchmarkResult {
155
+ /** Task name */
156
+ name: string;
157
+
158
+ /** Statistical analysis of the benchmark */
159
+ stats: BenchmarkStats;
160
+
161
+ /** Number of iterations executed */
162
+ iterations: number;
163
+
164
+ /** Total time spent benchmarking (including warmup) in milliseconds */
165
+ total_time_ms: number;
166
+
167
+ /**
168
+ * Raw timing data for each iteration in nanoseconds.
169
+ * Useful for custom statistical analysis, histogram generation,
170
+ * or exporting to external tools.
171
+ */
172
+ timings_ns: Array<number>;
173
+ }
174
+
175
+ /**
176
+ * Options for table formatting.
177
+ */
178
+ export interface BenchmarkFormatTableOptions {
179
+ /**
180
+ * Group results by category using filter functions.
181
+ */
182
+ groups?: Array<BenchmarkGroup>;
183
+ }
184
+
185
+ /**
186
+ * A group definition for organizing benchmark results.
187
+ */
188
+ export interface BenchmarkGroup {
189
+ /** Display name for the group */
190
+ name: string;
191
+
192
+ /** Optional description shown below the group name */
193
+ description?: string;
194
+
195
+ /** Filter function to determine which results belong to this group */
196
+ filter: (result: BenchmarkResult) => boolean;
197
+ }
@@ -13,11 +13,11 @@ import type {Url} from './url.js';
13
13
  export interface LibraryJson {
14
14
  package_json: PackageJson;
15
15
  source_json: SourceJson;
16
- /** Package name, e.g. `@ryanatkn/fuz`. */
16
+ /** Package name, e.g. `@fuzdev/fuz_ui`. */
17
17
  name: string;
18
18
  /** Name without scope, e.g. `fuz`. */
19
19
  repo_name: string;
20
- /** GitHub repo URL, e.g. `https://github.com/ryanatkn/fuz`. */
20
+ /** GitHub repo URL, e.g. `https://github.com/fuzdev/fuz_ui`. */
21
21
  repo_url: Url;
22
22
  /** GitHub user/org, e.g. `ryanatkn`. */
23
23
  owner_name: string | null;
@@ -95,7 +95,7 @@ export const library_json_parse = (
95
95
  };
96
96
 
97
97
  /**
98
- * Extracts repo name from a package name, e.g. `@ryanatkn/fuz` → `fuz`.
98
+ * Extracts repo name from a package name, e.g. `@fuzdev/fuz_ui` → `fuz`.
99
99
  */
100
100
  export const library_repo_name_parse = (name: string): string => {
101
101
  if (name[0] === '@') {
package/src/lib/object.ts CHANGED
@@ -85,7 +85,7 @@ export const reorder = <T extends Record<K, any>, K extends string | number>(
85
85
  /**
86
86
  * Frozen empty object with no properties, good for options default values.
87
87
  */
88
- export const EMPTY_OBJECT: Record<string | number | symbol, undefined> & object = Object.freeze({}); // eslint-disable-line @typescript-eslint/no-redundant-type-constituents
88
+ export const EMPTY_OBJECT: Record<string | number | symbol, undefined> & object = Object.freeze({});
89
89
 
90
90
  /**
91
91
  * Performs a depth-first traversal of an object's enumerable properties,